summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/horizon.c6
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c4
-rw-r--r--drivers/bus/mhi/pci_generic.c4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c23
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c7
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c77
-rw-r--r--drivers/infiniband/hw/mlx5/main.c44
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/Space.c178
-rw-r--r--drivers/net/appletalk/Kconfig4
-rw-r--r--drivers/net/appletalk/ipddp.c16
-rw-r--r--drivers/net/appletalk/ltpc.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_alb.c32
-rw-r--r--drivers/net/bonding/bond_main.c591
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c27
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c25
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/at91_can.c137
-rw-r--r--drivers/net/can/c_can/c_can.h25
-rw-r--r--drivers/net/can/c_can/c_can_main.c123
-rw-r--r--drivers/net/can/c_can/c_can_platform.c1
-rw-r--r--drivers/net/can/dev/dev.c66
-rw-r--r--drivers/net/can/dev/netlink.c11
-rw-r--r--drivers/net/can/dev/rx-offload.c90
-rw-r--r--drivers/net/can/flexcan.c129
-rw-r--r--drivers/net/can/janz-ican3.c23
-rw-r--r--drivers/net/can/m_can/m_can.c266
-rw-r--r--drivers/net/can/m_can/m_can.h11
-rw-r--r--drivers/net/can/m_can/m_can_pci.c11
-rw-r--r--drivers/net/can/m_can/m_can_platform.c31
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c17
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c338
-rw-r--r--drivers/net/can/sja1000/peak_pci.c119
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c30
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c12
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c5
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c82
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c19
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.h23
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c228
-rw-r--r--drivers/net/dsa/b53/b53_common.c10
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/mt7530.c148
-rw-r--r--drivers/net/dsa/mt7530.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c103
-rw-r--r--drivers/net/dsa/ocelot/Kconfig2
-rw-r--r--drivers/net/dsa/ocelot/felix.c143
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/sja1105/Kconfig1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h27
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c114
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1845
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c14
-rw-r--r--drivers/net/eql.c24
-rw-r--r--drivers/net/ethernet/3com/3c509.c7
-rw-r--r--drivers/net/ethernet/3com/3c515.c3
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig1
-rw-r--r--drivers/net/ethernet/8390/Kconfig3
-rw-r--r--drivers/net/ethernet/8390/apne.c11
-rw-r--r--drivers/net/ethernet/8390/ax88796.c9
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c5
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c9
-rw-r--r--drivers/net/ethernet/8390/wd.c7
-rw-r--r--drivers/net/ethernet/8390/xsurf100.c9
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c11
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/lance.c6
-rw-r--r--drivers/net/ethernet/amd/mvme147.c16
-rw-r--r--drivers/net/ethernet/amd/ni65.c6
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c139
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c319
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h53
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c13
-rw-r--r--drivers/net/ethernet/cavium/Kconfig4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig27
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c31
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c56
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c530
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c367
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h62
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h19
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.c80
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.h36
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h31
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c198
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c6
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig4
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c81
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c138
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c150
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c6
-rw-r--r--drivers/net/ethernet/i825xx/82596.c24
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c17
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig12
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c372
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c756
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h52
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c349
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c92
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c115
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c148
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c156
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c115
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c46
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c36
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h3
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c690
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c671
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c868
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c248
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c388
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c602
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c32
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c24
-rw-r--r--drivers/net/ethernet/mscc/Kconfig3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c173
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c360
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c64
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c620
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c333
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c63
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/Kconfig2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c42
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c27
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c13
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h19
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c129
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.h3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c19
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/Kconfig1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c18
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c81
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c26
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c19
-rw-r--r--drivers/net/hamradio/baycom_epp.c9
-rw-r--r--drivers/net/hamradio/baycom_par.c12
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c12
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c12
-rw-r--r--drivers/net/hamradio/bpqether.c9
-rw-r--r--drivers/net/hamradio/dmascc.c18
-rw-r--r--drivers/net/hamradio/hdlcdrv.c20
-rw-r--r--drivers/net/hamradio/scc.c13
-rw-r--r--drivers/net/hamradio/yam.c19
-rw-r--r--drivers/net/hippi/rrunner.c11
-rw-r--r--drivers/net/hippi/rrunner.h3
-rw-r--r--drivers/net/ipa/Makefile3
-rw-r--r--drivers/net/ipa/gsi.c241
-rw-r--r--drivers/net/ipa/gsi.h31
-rw-r--r--drivers/net/ipa/gsi_trans.c34
-rw-r--r--drivers/net/ipa/ipa.h14
-rw-r--r--drivers/net/ipa/ipa_clock.c215
-rw-r--r--drivers/net/ipa/ipa_clock.h35
-rw-r--r--drivers/net/ipa/ipa_cmd.c51
-rw-r--r--drivers/net/ipa/ipa_cmd.h22
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c15
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c11
-rw-r--r--drivers/net/ipa/ipa_endpoint.c40
-rw-r--r--drivers/net/ipa/ipa_interrupt.c78
-rw-r--r--drivers/net/ipa/ipa_interrupt.h8
-rw-r--r--drivers/net/ipa/ipa_main.c188
-rw-r--r--drivers/net/ipa/ipa_modem.c127
-rw-r--r--drivers/net/ipa/ipa_modem.h4
-rw-r--r--drivers/net/ipa/ipa_qmi.c6
-rw-r--r--drivers/net/ipa/ipa_qmi.h19
-rw-r--r--drivers/net/ipa/ipa_reg.h12
-rw-r--r--drivers/net/ipa/ipa_resource.c3
-rw-r--r--drivers/net/ipa/ipa_smp2p.c30
-rw-r--r--drivers/net/ipa/ipa_table.c40
-rw-r--r--drivers/net/ipa/ipa_table.h16
-rw-r--r--drivers/net/ipa/ipa_uc.c58
-rw-r--r--drivers/net/ipa/ipa_uc.h22
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/macvlan.c8
-rw-r--r--drivers/net/mctp/Kconfig8
-rw-r--r--drivers/net/mctp/Makefile0
-rw-r--r--drivers/net/mdio/Kconfig3
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c44
-rw-r--r--drivers/net/mhi/Makefile3
-rw-r--r--drivers/net/mhi/mhi.h41
-rw-r--r--drivers/net/mhi/proto_mbim.c304
-rw-r--r--drivers/net/mhi_net.c (renamed from drivers/net/mhi/net.c)166
-rw-r--r--drivers/net/mii.c6
-rw-r--r--drivers/net/netdevsim/bus.c43
-rw-r--r--drivers/net/netdevsim/dev.c25
-rw-r--r--drivers/net/netdevsim/fib.c2
-rw-r--r--drivers/net/netdevsim/netdev.c6
-rw-r--r--drivers/net/netdevsim/netdevsim.h2
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c18
-rw-r--r--drivers/net/phy/dp83822.c8
-rw-r--r--drivers/net/phy/intel-xway.c76
-rw-r--r--drivers/net/phy/marvell.c144
-rw-r--r--drivers/net/phy/marvell10g.c89
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c8
-rw-r--r--drivers/net/phy/mxl-gpy.c727
-rw-r--r--drivers/net/phy/nxp-tja11xx.c13
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c14
-rw-r--r--drivers/net/phy/phylink.c21
-rw-r--r--drivers/net/plip/plip.c12
-rw-r--r--drivers/net/ppp/ppp_generic.c6
-rw-r--r--drivers/net/sb1000.c20
-rw-r--r--drivers/net/slip/slip.c13
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c5
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c5
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rtl8150.c5
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/veth.c307
-rw-r--r--drivers/net/virtio_net.c37
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h50
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c221
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h22
-rw-r--r--drivers/net/vrf.c21
-rw-r--r--drivers/net/wan/Kconfig51
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/c101.c33
-rw-r--r--drivers/net/wan/cosa.c15
-rw-r--r--drivers/net/wan/farsync.c123
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c19
-rw-r--r--drivers/net/wan/hdlc.c9
-rw-r--r--drivers/net/wan/hdlc_cisco.c14
-rw-r--r--drivers/net/wan/hdlc_fr.c40
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/hdlc_raw.c14
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c14
-rw-r--r--drivers/net/wan/hdlc_x25.c16
-rw-r--r--drivers/net/wan/hostess_sv11.c13
-rw-r--r--drivers/net/wan/ixp4xx_hss.c22
-rw-r--r--drivers/net/wan/lmc/lmc.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c33
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c7
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h1
-rw-r--r--drivers/net/wan/n2.c32
-rw-r--r--drivers/net/wan/pc300too.c44
-rw-r--r--drivers/net/wan/pci200syn.c32
-rw-r--r--drivers/net/wan/sbni.c1638
-rw-r--r--drivers/net/wan/sbni.h147
-rw-r--r--drivers/net/wan/sealevel.c10
-rw-r--r--drivers/net/wan/wanxl.c21
-rw-r--r--drivers/net/wireless/cisco/airo.c15
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h3
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c30
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c3
-rw-r--r--drivers/net/wwan/Kconfig12
-rw-r--r--drivers/net/wwan/Makefile1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c19
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c658
-rw-r--r--drivers/net/wwan/wwan_core.c7
-rw-r--r--drivers/nfc/fdp/fdp.c38
-rw-r--r--drivers/nfc/fdp/fdp.h4
-rw-r--r--drivers/nfc/fdp/i2c.c8
-rw-r--r--drivers/nfc/mei_phy.c4
-rw-r--r--drivers/nfc/mei_phy.h2
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/microread.c14
-rw-r--r--drivers/nfc/microread/microread.h6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c16
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c4
-rw-r--r--drivers/nfc/nfcmrvl/main.c6
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h6
-rw-r--r--drivers/nfc/nfcmrvl/spi.c4
-rw-r--r--drivers/nfc/nfcmrvl/uart.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c2
-rw-r--r--drivers/nfc/nfcsim.c4
-rw-r--r--drivers/nfc/nxp-nci/core.c2
-rw-r--r--drivers/nfc/pn533/pn533.c2
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/pn544/pn544.c17
-rw-r--r--drivers/nfc/pn544/pn544.h7
-rw-r--r--drivers/nfc/port100.c47
-rw-r--r--drivers/nfc/s3fwrn5/core.c7
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c12
-rw-r--r--drivers/nfc/s3fwrn5/nci.c8
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/st-nci/core.c5
-rw-r--r--drivers/nfc/st-nci/i2c.c2
-rw-r--r--drivers/nfc/st-nci/ndlc.c6
-rw-r--r--drivers/nfc/st-nci/ndlc.h8
-rw-r--r--drivers/nfc/st-nci/spi.c2
-rw-r--r--drivers/nfc/st-nci/vendor_cmds.c2
-rw-r--r--drivers/nfc/st21nfca/core.c6
-rw-r--r--drivers/nfc/st21nfca/i2c.c6
-rw-r--r--drivers/nfc/st21nfca/st21nfca.h4
-rw-r--r--drivers/nfc/st21nfca/vendor_cmds.c2
-rw-r--r--drivers/nfc/st95hf/core.c2
-rw-r--r--drivers/nfc/trf7970a.c19
-rw-r--r--drivers/nfc/virtual_ncidev.c13
-rw-r--r--drivers/ptp/Kconfig24
-rw-r--r--drivers/ptp/ptp_ocp.c1283
-rw-r--r--drivers/ptp/ptp_vclock.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c22
-rw-r--r--drivers/s390/net/Kconfig10
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core.h51
-rw-r--r--drivers/s390/net/qeth_core_main.c189
-rw-r--r--drivers/s390/net/qeth_core_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core_mpc.h23
-rw-r--r--drivers/s390/net/qeth_core_sys.c5
-rw-r--r--drivers/s390/net/qeth_ethtool.c7
-rw-r--r--drivers/s390/net/qeth_l2_main.c414
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet.c12
-rw-r--r--drivers/staging/qlge/qlge_main.c5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c14
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c18
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c76
-rw-r--r--drivers/tty/synclink_gt.c19
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c2
681 files changed, 20764 insertions, 11068 deletions
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 4f2951cbe69c..d0e67ec46216 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2167,10 +2167,10 @@ static int hrz_open (struct atm_vcc *atm_vcc)
// Part of the job is done by atm_pcr_goal which gives us a PCR
// specification which says: EITHER grab the maximum available PCR
- // (and perhaps a lower bound which we musn't pass), OR grab this
+ // (and perhaps a lower bound which we must not pass), OR grab this
// amount, rounding down if you have to (and perhaps a lower bound
- // which we musn't pass) OR grab this amount, rounding up if you
- // have to (and perhaps an upper bound which we musn't pass). If any
+ // which we must not pass) OR grab this amount, rounding up if you
+ // have to (and perhaps an upper bound which we must not pass). If any
// bounds ARE passed we fail. Note that rounding is only rounding to
// match device limitations, we do not round down to satisfy
// bandwidth availability even if this would not violate any given
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 9e4bd751db79..81ce81a75fc6 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3536,7 +3536,7 @@ static int idt77252_preset(struct idt77252_dev *card)
return -1;
}
if (!(pci_command & PCI_COMMAND_IO)) {
- printk("%s: PCI_COMMAND: %04x (???)\n",
+ printk("%s: PCI_COMMAND: %04x (?)\n",
card->name, pci_command);
deinit_card(card);
return (-1);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 09c8ab5e0959..b3691de8ac06 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -914,7 +914,8 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
-struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id)
{
struct fsl_mc_device *mc_bus_dev, *endpoint;
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
@@ -925,6 +926,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
strcpy(endpoint1.type, mc_dev->obj_desc.type);
endpoint1.id = mc_dev->obj_desc.id;
+ endpoint1.if_id = if_id;
err = dprc_get_connection(mc_bus_dev->mc_io, 0,
mc_bus_dev->mc_handle,
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 4dd1077354af..b33b9d75e8af 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -32,6 +32,7 @@
* @edl: emergency download mode firmware path (if any)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
*/
@@ -42,6 +43,7 @@ struct mhi_pci_dev_info {
const char *edl;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int mru_default;
bool sideband_wake;
};
@@ -272,6 +274,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -664,6 +667,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->status_cb = mhi_pci_status_cb;
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+ mhi_cntrl->mru = info->mru_default;
if (info->sideband_wake) {
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 6eaefea0520e..5ac53dcb3a6a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -4050,16 +4050,15 @@ static int hdlcdev_close(struct net_device *dev)
* called by network layer to process IOCTL call to network device
*
* dev pointer to network device structure
- * ifr pointer to network interface request structure
- * cmd IOCTL command code
+ * ifs pointer to network interface settings structure
*
* returns 0 if success, otherwise error code
*/
-static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hdlcdev_wan_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
MGSLPC_INFO *info = dev_to_port(dev);
unsigned int flags;
@@ -4070,17 +4069,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (info->port.count)
return -EBUSY;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
memset(&new_line, 0, size);
- switch(ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
@@ -4148,9 +4144,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
tty_kref_put(tty);
}
return 0;
-
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -4225,7 +4220,7 @@ static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_siocwandev = hdlcdev_wan_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
};
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index b8e5e371bb19..a190fb581591 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -996,7 +996,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
- MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index c869b2a91a28..e95967aefe78 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1436,11 +1436,10 @@ out:
rcu_read_unlock();
}
-static bool is_apu_thread_cq(struct mlx5_ib_dev *dev, const void *in)
+static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
{
if (!MLX5_CAP_GEN(dev->mdev, apu) ||
- !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
- apu_thread_cq))
+ !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
return false;
return true;
@@ -1500,7 +1499,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
cmd_in_len, cmd_out, cmd_out_len);
} else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
- !is_apu_thread_cq(dev, cmd_in)) {
+ !is_apu_cq(dev, cmd_in)) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index b25e0b33a11a..52821485371a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -8,13 +8,15 @@
#include "srq.h"
static int
-mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep,
+ int vport_index)
{
struct mlx5_ib_dev *ibdev;
- int vport_index;
ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
- vport_index = rep->vport_index;
+ if (!ibdev)
+ return -EINVAL;
ibdev->port[vport_index].rep = rep;
rep->rep_data[REP_IB].priv = ibdev;
@@ -26,19 +28,39 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return 0;
}
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
+
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
u32 num_ports = mlx5_eswitch_get_total_vports(dev);
const struct mlx5_ib_profile *profile;
+ struct mlx5_core_dev *peer_dev;
struct mlx5_ib_dev *ibdev;
+ u32 peer_num_ports;
int vport_index;
int ret;
+ vport_index = rep->vport_index;
+
+ if (mlx5_lag_is_shared_fdb(dev)) {
+ peer_dev = mlx5_lag_get_peer_mdev(dev);
+ peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
+ if (mlx5_lag_is_master(dev)) {
+ /* Only 1 ib port is the representor for both uplinks */
+ num_ports += peer_num_ports - 1;
+ } else {
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ return 0;
+ vport_index += peer_num_ports;
+ dev = peer_dev;
+ }
+ }
+
if (rep->vport == MLX5_VPORT_UPLINK)
profile = &raw_eth_profile;
else
- return mlx5_ib_set_vport_rep(dev, rep);
+ return mlx5_ib_set_vport_rep(dev, rep, vport_index);
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!ibdev)
@@ -64,6 +86,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto fail_add;
rep->rep_data[REP_IB].priv = ibdev;
+ if (mlx5_lag_is_shared_fdb(dev))
+ mlx5_ib_register_peer_vport_reps(dev);
return 0;
@@ -82,18 +106,45 @@ static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
static void
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
+ struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw);
struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
+ int vport_index = rep->vport_index;
struct mlx5_ib_port *port;
- port = &dev->port[rep->vport_index];
+ if (WARN_ON(!mdev))
+ return;
+
+ if (mlx5_lag_is_shared_fdb(mdev) &&
+ !mlx5_lag_is_master(mdev)) {
+ struct mlx5_core_dev *peer_mdev;
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ return;
+ peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+ vport_index += mlx5_eswitch_get_total_vports(peer_mdev);
+ }
+
+ if (!dev)
+ return;
+
+ port = &dev->port[vport_index];
write_lock(&port->roce.netdev_lock);
port->roce.netdev = NULL;
write_unlock(&port->roce.netdev_lock);
rep->rep_data[REP_IB].priv = NULL;
port->rep = NULL;
- if (rep->vport == MLX5_VPORT_UPLINK)
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5_eswitch *esw;
+
+ if (mlx5_lag_is_shared_fdb(mdev)) {
+ peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+ esw = peer_mdev->priv.eswitch;
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+ }
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ }
}
static const struct mlx5_eswitch_rep_ops rep_ops = {
@@ -102,6 +153,18 @@ static const struct mlx5_eswitch_rep_ops rep_ops = {
.get_proto_dev = mlx5_ib_rep_to_dev,
};
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+ struct mlx5_eswitch *esw;
+
+ if (!peer_mdev)
+ return;
+
+ esw = peer_mdev->priv.eswitch;
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+}
+
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
u16 vport_num)
{
@@ -123,7 +186,7 @@ struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
rep = dev->port[port - 1].rep;
- return mlx5_eswitch_add_send_to_vport_rule(esw, rep, sq->base.mqp.qpn);
+ return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn);
}
static int mlx5r_rep_probe(struct auxiliary_device *adev,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 094c976b1eed..ae05e143401c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -126,6 +126,7 @@ static int get_port_state(struct ib_device *ibdev,
static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
struct net_device *ndev,
+ struct net_device *upper,
u32 *port_num)
{
struct net_device *rep_ndev;
@@ -137,6 +138,14 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
if (!port->rep)
continue;
+ if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
+ *port_num = i + 1;
+ return &port->roce;
+ }
+
+ if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
+ continue;
+
read_lock(&port->roce.netdev_lock);
rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
port->rep->vport);
@@ -196,11 +205,12 @@ static int mlx5_netdev_event(struct notifier_block *this,
}
if (ibdev->is_rep)
- roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
+ roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
if (!roce)
return NOTIFY_DONE;
- if ((upper == ndev || (!upper && ndev == roce->netdev))
- && ibdev->ib_active) {
+ if ((upper == ndev ||
+ ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
+ ibdev->ib_active) {
struct ib_event ibev = { };
enum ib_port_state port_state;
@@ -3012,7 +3022,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
struct mlx5_flow_table *ft;
int err;
- if (!ns || !mlx5_lag_is_roce(mdev))
+ if (!ns || !mlx5_lag_is_active(mdev))
return 0;
err = mlx5_cmd_create_vport_lag(mdev);
@@ -3074,9 +3084,11 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
- err = mlx5_nic_vport_enable_roce(dev->mdev);
- if (err)
- return err;
+ if (!dev->is_rep && dev->profile != &raw_eth_profile) {
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+ }
err = mlx5_eth_lag_init(dev);
if (err)
@@ -3085,7 +3097,8 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
return 0;
err_disable_roce:
- mlx5_nic_vport_disable_roce(dev->mdev);
+ if (!dev->is_rep && dev->profile != &raw_eth_profile)
+ mlx5_nic_vport_disable_roce(dev->mdev);
return err;
}
@@ -3093,7 +3106,8 @@ err_disable_roce:
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
mlx5_eth_lag_cleanup(dev);
- mlx5_nic_vport_disable_roce(dev->mdev);
+ if (!dev->is_rep && dev->profile != &raw_eth_profile)
+ mlx5_nic_vport_disable_roce(dev->mdev);
}
static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
@@ -3950,12 +3964,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
/* Register only for native ports */
err = mlx5_add_netdev_notifier(dev, port_num);
- if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
- /*
- * We don't enable ETH interface for
- * 1. IB representors
- * 2. User disabled ROCE through devlink interface
- */
+ if (err)
return err;
err = mlx5_enable_eth(dev);
@@ -3980,8 +3989,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- if (!dev->is_rep)
- mlx5_disable_eth(dev);
+ mlx5_disable_eth(dev);
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
mlx5_remove_netdev_notifier(dev, port_num);
@@ -4037,7 +4045,7 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
const char *name;
- if (!mlx5_lag_is_roce(dev->mdev))
+ if (!mlx5_lag_is_active(dev->mdev))
name = "mlx5_%d";
else
name = "mlx5_bond_%d";
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index c0ddf7b3c6e2..bbfcce3bdc84 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -114,14 +114,18 @@ out:
static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
struct mlx5_ib_uapi_query_port *info)
{
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_eswitch_rep *rep;
+ struct mlx5_core_dev *mdev;
int err;
rep = dev->port[port_num - 1].rep;
if (!rep)
return -EOPNOTSUPP;
+ mdev = mlx5_eswitch_get_core_dev(rep->esw);
+ if (!mdev)
+ return -EINVAL;
+
info->vport = rep->vport;
info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
@@ -138,9 +142,9 @@ static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
if (err)
return err;
- if (mlx5_eswitch_vport_match_metadata_enabled(mdev->priv.eswitch)) {
+ if (mlx5_eswitch_vport_match_metadata_enabled(rep->esw)) {
info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
- mdev->priv.eswitch, rep->vport);
+ rep->esw, rep->vport);
info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index abf60f4d9203..0aa8629fdf62 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1745,10 +1745,10 @@ static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- if (!priv->rn_ops->ndo_do_ioctl)
+ if (!priv->rn_ops->ndo_eth_ioctl)
return -EOPNOTSUPP;
- return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
+ return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
}
static int ipoib_dev_init(struct net_device *dev)
@@ -2078,7 +2078,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
- .ndo_do_ioctl = ipoib_ioctl,
+ .ndo_eth_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -2093,7 +2093,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
.ndo_get_stats64 = ipoib_get_stats,
- .ndo_do_ioctl = ipoib_ioctl,
+ .ndo_eth_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_default_pf = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6977f8248df7..f37b1c56f7c4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -431,10 +431,10 @@ config VSOCKMON
config MHI_NET
tristate "MHI network driver"
depends on MHI_BUS
- select WWAN
help
This is the network driver for MHI bus. It can be used with
- QCOM based WWAN modems (like SDX55). Say Y or M.
+ QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
+ Say Y or M.
endif # NET_CORE
@@ -483,6 +483,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/mctp/Kconfig"
+
source "drivers/net/mdio/Kconfig"
source "drivers/net/pcs/Kconfig"
@@ -549,8 +551,8 @@ config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
- IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
- PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
+ IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \
+ PPC_64K_PAGES)
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
@@ -604,4 +606,11 @@ config NET_FAILOVER
a VM with direct attached VF by failing over to the paravirtual
datapath when the VF is unplugged.
+config NETDEV_LEGACY_INIT
+ bool
+ depends on ISA
+ help
+ Drivers that call netdev_boot_setup_check() should select this
+ symbol, everything else no longer needs it.
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7ffd2d03efaf..739838623cf6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,7 +18,8 @@ obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_MII) += mii.o
obj-$(CONFIG_MDIO) += mdio.o
-obj-$(CONFIG_NET) += Space.o loopback.o
+obj-$(CONFIG_NET) += loopback.o
+obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
obj-y += mdio/
@@ -36,7 +37,7 @@ obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
-obj-$(CONFIG_MHI_NET) += mhi/
+obj-$(CONFIG_MHI_NET) += mhi_net.o
#
# Networking Drivers
@@ -69,6 +70,7 @@ obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_WWAN) += wwan/
+obj-$(CONFIG_MCTP) += mctp/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index df79e7370bcc..49e67c9fb5a4 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -30,6 +30,148 @@
#include <linux/netlink.h>
#include <net/Space.h>
+/*
+ * This structure holds boot-time configured netdevice settings. They
+ * are then used in the device probing.
+ */
+struct netdev_boot_setup {
+ char name[IFNAMSIZ];
+ struct ifmap map;
+};
+#define NETDEV_BOOT_SETUP_MAX 8
+
+
+/******************************************************************************
+ *
+ * Device Boot-time Settings Routines
+ *
+ ******************************************************************************/
+
+/* Boot time configuration table */
+static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
+
+/**
+ * netdev_boot_setup_add - add new setup entry
+ * @name: name of the device
+ * @map: configured settings for the device
+ *
+ * Adds new setup entry to the dev_boot_setup list. The function
+ * returns 0 on error and 1 on success. This is a generic routine to
+ * all netdevices.
+ */
+static int netdev_boot_setup_add(char *name, struct ifmap *map)
+{
+ struct netdev_boot_setup *s;
+ int i;
+
+ s = dev_boot_setup;
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+ if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
+ memset(s[i].name, 0, sizeof(s[i].name));
+ strlcpy(s[i].name, name, IFNAMSIZ);
+ memcpy(&s[i].map, map, sizeof(s[i].map));
+ break;
+ }
+ }
+
+ return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
+}
+
+/**
+ * netdev_boot_setup_check - check boot time settings
+ * @dev: the netdevice
+ *
+ * Check boot time settings for the device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found, 1 if they are.
+ */
+int netdev_boot_setup_check(struct net_device *dev)
+{
+ struct netdev_boot_setup *s = dev_boot_setup;
+ int i;
+
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+ if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
+ !strcmp(dev->name, s[i].name)) {
+ dev->irq = s[i].map.irq;
+ dev->base_addr = s[i].map.base_addr;
+ dev->mem_start = s[i].map.mem_start;
+ dev->mem_end = s[i].map.mem_end;
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(netdev_boot_setup_check);
+
+/**
+ * netdev_boot_base - get address from boot time settings
+ * @prefix: prefix for network device
+ * @unit: id for network device
+ *
+ * Check boot time settings for the base address of device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found.
+ */
+static unsigned long netdev_boot_base(const char *prefix, int unit)
+{
+ const struct netdev_boot_setup *s = dev_boot_setup;
+ char name[IFNAMSIZ];
+ int i;
+
+ sprintf(name, "%s%d", prefix, unit);
+
+ /*
+ * If device already registered then return base of 1
+ * to indicate not to probe for this interface
+ */
+ if (__dev_get_by_name(&init_net, name))
+ return 1;
+
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
+ if (!strcmp(name, s[i].name))
+ return s[i].map.base_addr;
+ return 0;
+}
+
+/*
+ * Saves at boot time configured settings for any netdevice.
+ */
+static int __init netdev_boot_setup(char *str)
+{
+ int ints[5];
+ struct ifmap map;
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+ if (!str || !*str)
+ return 0;
+
+ /* Save settings */
+ memset(&map, 0, sizeof(map));
+ if (ints[0] > 0)
+ map.irq = ints[1];
+ if (ints[0] > 1)
+ map.base_addr = ints[2];
+ if (ints[0] > 2)
+ map.mem_start = ints[3];
+ if (ints[0] > 3)
+ map.mem_end = ints[4];
+
+ /* Add new entry to the list */
+ return netdev_boot_setup_add(str, &map);
+}
+
+__setup("netdev=", netdev_boot_setup);
+
+static int __init ether_boot_setup(char *str)
+{
+ return netdev_boot_setup(str);
+}
+__setup("ether=", ether_boot_setup);
+
+
/* A unified ethernet device probe. This is the easiest way to have every
* ethernet adaptor have the name "eth[0123...]".
*/
@@ -77,39 +219,15 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_SMC9194
{smc_init, 0},
#endif
-#ifdef CONFIG_CS89x0
-#ifndef CONFIG_CS89x0_PLATFORM
+#ifdef CONFIG_CS89x0_ISA
{cs89x0_probe, 0},
#endif
-#endif
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel */
- {i82596_probe, 0}, /* I82596 */
-#endif
#ifdef CONFIG_NI65
{ni65_probe, 0},
#endif
{NULL, 0},
};
-static struct devprobe2 m68k_probes[] __initdata = {
-#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
- {atarilance_probe, 0},
-#endif
-#ifdef CONFIG_SUN3LANCE /* sun3 onboard Lance chip */
- {sun3lance_probe, 0},
-#endif
-#ifdef CONFIG_SUN3_82586 /* sun3 onboard Intel 82586 chip */
- {sun3_82586_probe, 0},
-#endif
-#ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */
- {apne_probe, 0},
-#endif
-#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */
- {mvme147lance_probe, 0},
-#endif
- {NULL, 0},
-};
-
/* Unified ethernet device probe, segmented per architecture and
* per bus interface. This drives the legacy devices only for now.
*/
@@ -121,8 +239,7 @@ static void __init ethif_probe2(int unit)
if (base_addr == 1)
return;
- (void)(probe_list2(unit, m68k_probes, base_addr == 0) &&
- probe_list2(unit, isa_probes, base_addr == 0));
+ probe_list2(unit, isa_probes, base_addr == 0);
}
/* Statically configured drivers -- order matters here. */
@@ -130,10 +247,6 @@ static int __init net_olddevs_init(void)
{
int num;
-#ifdef CONFIG_SBNI
- for (num = 0; num < 8; ++num)
- sbni_probe(num);
-#endif
for (num = 0; num < 8; ++num)
ethif_probe2(num);
@@ -142,9 +255,6 @@ static int __init net_olddevs_init(void)
cops_probe(1);
cops_probe(2);
#endif
-#ifdef CONFIG_LTPC
- ltpc_probe();
-#endif
return 0;
}
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 43918398f0d3..90b9f1d6eda9 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -52,7 +52,9 @@ config LTPC
config COPS
tristate "COPS LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA)
+ depends on DEV_APPLETALK && ISA
+ depends on NETDEVICES
+ select NETDEV_LEGACY_INIT
help
This allows you to use COPS AppleTalk cards to connect to LocalTalk
networks. You also need version 1.3.3 or later of the netatalk
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 51cf5eca9c7f..5566daefbff4 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -54,11 +54,12 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb,
static int ipddp_create(struct ipddp_route *new_rt);
static int ipddp_delete(struct ipddp_route *rt);
static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
-static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static const struct net_device_ops ipddp_netdev_ops = {
.ndo_start_xmit = ipddp_xmit,
- .ndo_do_ioctl = ipddp_ioctl,
+ .ndo_siocdevprivate = ipddp_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -268,15 +269,18 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
return NULL;
}
-static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- struct ipddp_route __user *rt = ifr->ifr_data;
struct ipddp_route rcp, rcp2, *rp;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
if(!capable(CAP_NET_ADMIN))
return -EPERM;
- if(copy_from_user(&rcp, rt, sizeof(rcp)))
+ if (copy_from_user(&rcp, data, sizeof(rcp)))
return -EFAULT;
switch(cmd)
@@ -296,7 +300,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
- if (copy_to_user(rt, &rcp2,
+ if (copy_to_user(data, &rcp2,
sizeof(struct ipddp_route)))
return -EFAULT;
return 0;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 69c270885ff0..1f8925e75b3f 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1015,7 +1015,7 @@ static const struct net_device_ops ltpc_netdev = {
.ndo_set_rx_mode = set_multicast_list,
};
-struct net_device * __init ltpc_probe(void)
+static struct net_device * __init ltpc_probe(void)
{
struct net_device *dev;
int err = -ENOMEM;
@@ -1221,12 +1221,10 @@ static int __init ltpc_setup(char *str)
}
__setup("ltpc=", ltpc_setup);
-#endif /* MODULE */
+#endif
static struct net_device *dev_ltpc;
-#ifdef MODULE
-
MODULE_LICENSE("GPL");
module_param(debug, int, 0);
module_param_hw(io, int, ioport, 0);
@@ -1244,7 +1242,6 @@ static int __init ltpc_module_init(void)
return PTR_ERR_OR_ZERO(dev_ltpc);
}
module_init(ltpc_module_init);
-#endif
static void __exit ltpc_cleanup(void)
{
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6908822d9773..a4a202b9a0a2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -96,7 +96,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port);
+static void ad_periodic_machine(struct port *port, struct bond_params bond_params);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
@@ -1294,10 +1294,11 @@ static void ad_tx_machine(struct port *port)
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
+ * @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port)
+static void ad_periodic_machine(struct port *port, struct bond_params bond_params)
{
periodic_states_t last_state;
@@ -1306,8 +1307,8 @@ static void ad_periodic_machine(struct port *port)
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))
- ) {
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
+ !bond_params.lacp_active) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -2341,7 +2342,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port);
+ ad_periodic_machine(port, bond->params);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 22e5632089ac..7d3752cbf761 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -17,7 +17,6 @@
#include <linux/if_bonding.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
-#include <net/ipx.h>
#include <net/arp.h>
#include <net/ipv6.h>
#include <asm/byteorder.h>
@@ -1351,8 +1350,6 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
case htons(ETH_P_IP):
- case htons(ETH_P_IPX):
- /* In case of IPX, it will falback to L2 hash */
case htons(ETH_P_IPV6):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
@@ -1454,35 +1451,6 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
hash_size = sizeof(ip6hdr->daddr);
break;
}
- case ETH_P_IPX: {
- const struct ipxhdr *ipxhdr;
-
- if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
- do_tx_balance = false;
- break;
- }
- ipxhdr = (struct ipxhdr *)skb_network_header(skb);
-
- if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
- /* something is wrong with this packet */
- do_tx_balance = false;
- break;
- }
-
- if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
- /* The only protocol worth balancing in
- * this family since it has an "ARP" like
- * mechanism
- */
- do_tx_balance = false;
- break;
- }
-
- eth_data = eth_hdr(skb);
- hash_start = (char *)eth_data->h_dest;
- hash_size = ETH_ALEN;
- break;
- }
case ETH_P_ARP:
do_tx_balance = false;
if (bond_info->rlb_enabled)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 31730efa7538..b0966e733926 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -317,6 +317,25 @@ bool bond_sk_check(struct bonding *bond)
}
}
+static bool bond_xdp_check(struct bonding *bond)
+{
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_ROUNDROBIN:
+ case BOND_MODE_ACTIVEBACKUP:
+ return true;
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
+ * payload is not in the packet due to hardware offload.
+ */
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
/*---------------------------------- VLAN -----------------------------------*/
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -732,7 +751,7 @@ static int bond_check_dev_link(struct bonding *bond,
BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_do_ioctl;
+ ioctl = slave_ops->ndo_eth_ioctl;
if (ioctl) {
/* TODO: set pointer to correct ioctl on a per team member
* bases to make this more efficient. that is, once
@@ -756,7 +775,7 @@ static int bond_check_dev_link(struct bonding *bond,
}
}
- /* If reporting, report that either there's no dev->do_ioctl,
+ /* If reporting, report that either there's no ndo_eth_ioctl,
* or both SIOCGMIIREG and get_link failed (meaning that we
* cannot report link status). If not reporting, pretend
* we're ok.
@@ -1712,6 +1731,20 @@ void bond_lower_state_changed(struct slave *slave)
netdev_lower_state_changed(slave->dev, &info);
}
+#define BOND_NL_ERR(bond_dev, extack, errmsg) do { \
+ if (extack) \
+ NL_SET_ERR_MSG(extack, errmsg); \
+ else \
+ netdev_err(bond_dev, "Error: %s\n", errmsg); \
+} while (0)
+
+#define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \
+ if (extack) \
+ NL_SET_ERR_MSG(extack, errmsg); \
+ else \
+ slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
+} while (0)
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
@@ -1725,29 +1758,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (slave_dev->flags & IFF_MASTER &&
!netif_is_bond_master(slave_dev)) {
- NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved");
- netdev_err(bond_dev,
- "Error: Device with IFF_MASTER cannot be enslaved\n");
+ BOND_NL_ERR(bond_dev, extack,
+ "Device type (master device) cannot be enslaved");
return -EPERM;
}
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_do_ioctl == NULL) {
+ slave_ops->ndo_eth_ioctl == NULL) {
slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
}
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
- NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
- slave_err(bond_dev, slave_dev,
- "Error: Device is in use and cannot be enslaved\n");
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Device is in use and cannot be enslaved");
return -EBUSY;
}
if (bond_dev == slave_dev) {
- NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
- netdev_err(bond_dev, "cannot enslave bond to itself.\n");
+ BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
return -EPERM;
}
@@ -1756,8 +1786,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
if (vlan_uses_dev(bond_dev)) {
- NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
- slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n");
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Can not enslave VLAN challenged device to VLAN enabled bond");
return -EPERM;
} else {
slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
@@ -1775,8 +1805,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* enslaving it; the old ifenslave will not.
*/
if (slave_dev->flags & IFF_UP) {
- NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
- slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n");
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Device can not be enslaved while up");
return -EPERM;
}
@@ -1815,17 +1845,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
- NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
- slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n",
- slave_dev->type, bond_dev->type);
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Device type is different from other slaves");
return -EINVAL;
}
if (slave_dev->type == ARPHRD_INFINIBAND &&
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
- NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
- slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n",
- slave_dev->type);
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Only active-backup mode is supported for infiniband slaves");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
@@ -1839,8 +1867,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
} else {
- NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
- slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
@@ -2133,6 +2161,39 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond_update_slave_arr(bond, NULL);
+ if (!slave_dev->netdev_ops->ndo_bpf ||
+ !slave_dev->netdev_ops->ndo_xdp_xmit) {
+ if (bond->xdp_prog) {
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Slave does not support XDP");
+ res = -EOPNOTSUPP;
+ goto err_sysfs_del;
+ }
+ } else {
+ struct netdev_bpf xdp = {
+ .command = XDP_SETUP_PROG,
+ .flags = 0,
+ .prog = bond->xdp_prog,
+ .extack = extack,
+ };
+
+ if (dev_xdp_prog_count(slave_dev) > 0) {
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "Slave has XDP program loaded, please unload before enslaving");
+ res = -EOPNOTSUPP;
+ goto err_sysfs_del;
+ }
+
+ res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ if (res < 0) {
+ /* ndo_bpf() sets extack error message */
+ slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
+ goto err_sysfs_del;
+ }
+ if (bond->xdp_prog)
+ bpf_prog_inc(bond->xdp_prog);
+ }
+
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
@@ -2252,7 +2313,17 @@ static int __bond_release_one(struct net_device *bond_dev,
/* recompute stats just before removing the slave */
bond_get_stats(bond->dev, &bond->bond_stats);
- bond_upper_dev_unlink(bond, slave);
+ if (bond->xdp_prog) {
+ struct netdev_bpf xdp = {
+ .command = XDP_SETUP_PROG,
+ .flags = 0,
+ .prog = NULL,
+ .extack = NULL,
+ };
+ if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
+ }
+
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
@@ -2261,6 +2332,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
+ bond_upper_dev_unlink(bond, slave);
+
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, slave);
@@ -3613,90 +3686,112 @@ static struct notifier_block bond_netdev_notifier = {
/*---------------------------- Hashing Policies -----------------------------*/
+/* Helper to access data in a packet, with or without a backing skb.
+ * If skb is given the data is linearized if necessary via pskb_may_pull.
+ */
+static inline const void *bond_pull_data(struct sk_buff *skb,
+ const void *data, int hlen, int n)
+{
+ if (likely(n <= hlen))
+ return data;
+ else if (skb && likely(pskb_may_pull(skb, n)))
+ return skb->head;
+
+ return NULL;
+}
+
/* L2 hash helper */
-static inline u32 bond_eth_hash(struct sk_buff *skb)
+static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
{
- struct ethhdr *ep, hdr_tmp;
+ struct ethhdr *ep;
- ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
- if (ep)
- return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
- return 0;
+ data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
+ if (!data)
+ return 0;
+
+ ep = (struct ethhdr *)(data + mhoff);
+ return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
}
-static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
- int *noff, int *proto, bool l34)
+static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
+ int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
{
const struct ipv6hdr *iph6;
const struct iphdr *iph;
- if (skb->protocol == htons(ETH_P_IP)) {
- if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
+ if (l2_proto == htons(ETH_P_IP)) {
+ data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
+ if (!data)
return false;
- iph = (const struct iphdr *)(skb->data + *noff);
+
+ iph = (const struct iphdr *)(data + *nhoff);
iph_to_flow_copy_v4addrs(fk, iph);
- *noff += iph->ihl << 2;
+ *nhoff += iph->ihl << 2;
if (!ip_is_fragment(iph))
- *proto = iph->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
+ *ip_proto = iph->protocol;
+ } else if (l2_proto == htons(ETH_P_IPV6)) {
+ data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
+ if (!data)
return false;
- iph6 = (const struct ipv6hdr *)(skb->data + *noff);
+
+ iph6 = (const struct ipv6hdr *)(data + *nhoff);
iph_to_flow_copy_v6addrs(fk, iph6);
- *noff += sizeof(*iph6);
- *proto = iph6->nexthdr;
+ *nhoff += sizeof(*iph6);
+ *ip_proto = iph6->nexthdr;
} else {
return false;
}
- if (l34 && *proto >= 0)
- fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);
+ if (l34 && *ip_proto >= 0)
+ fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
-static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
+static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
{
- struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
u32 srcmac_vendor = 0, srcmac_dev = 0;
- u16 vlan;
+ struct ethhdr *mac_hdr;
+ u16 vlan = 0;
int i;
+ data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
+ if (!data)
+ return 0;
+ mac_hdr = (struct ethhdr *)(data + mhoff);
+
for (i = 0; i < 3; i++)
srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
for (i = 3; i < ETH_ALEN; i++)
srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
- if (!skb_vlan_tag_present(skb))
- return srcmac_vendor ^ srcmac_dev;
-
- vlan = skb_vlan_tag_get(skb);
+ if (skb && skb_vlan_tag_present(skb))
+ vlan = skb_vlan_tag_get(skb);
return vlan ^ srcmac_vendor ^ srcmac_dev;
}
/* Extract the appropriate headers based on bond's xmit policy */
-static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
- struct flow_keys *fk)
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
+ __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
{
bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
- int noff, proto = -1;
+ int ip_proto = -1;
switch (bond->params.xmit_policy) {
case BOND_XMIT_POLICY_ENCAP23:
case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
- fk, NULL, 0, 0, 0, 0);
+ fk, data, l2_proto, nhoff, hlen, 0);
default:
break;
}
fk->ports.ports = 0;
memset(&fk->icmp, 0, sizeof(fk->icmp));
- noff = skb_network_offset(skb);
- if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
+ if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
return false;
/* ICMP error packets contains at least 8 bytes of the header
@@ -3704,22 +3799,20 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
* to correlate ICMP error packets within the same flow which
* generated the error.
*/
- if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
- skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
- skb_transport_offset(skb),
- skb_headlen(skb));
- if (proto == IPPROTO_ICMP) {
+ if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
+ skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
+ if (ip_proto == IPPROTO_ICMP) {
if (!icmp_is_err(fk->icmp.type))
return true;
- noff += sizeof(struct icmphdr);
- } else if (proto == IPPROTO_ICMPV6) {
+ nhoff += sizeof(struct icmphdr);
+ } else if (ip_proto == IPPROTO_ICMPV6) {
if (!icmpv6_is_err(fk->icmp.type))
return true;
- noff += sizeof(struct icmp6hdr);
+ nhoff += sizeof(struct icmp6hdr);
}
- return bond_flow_ip(skb, fk, &noff, &proto, l34);
+ return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
}
return true;
@@ -3735,33 +3828,26 @@ static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
return hash >> 1;
}
-/**
- * bond_xmit_hash - generate a hash value based on the xmit policy
- * @bond: bonding device
- * @skb: buffer to use for headers
- *
- * This function will extract the necessary headers from the skb buffer and use
- * them to generate a hash based on the xmit_policy set in the bonding device
+/* Generate hash based on xmit policy. If @skb is given it is used to linearize
+ * the data as required, but this function can be used without it if the data is
+ * known to be linear (e.g. with xdp_buff).
*/
-u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
+ __be16 l2_proto, int mhoff, int nhoff, int hlen)
{
struct flow_keys flow;
u32 hash;
- if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
- skb->l4_hash)
- return skb->hash;
-
if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
- return bond_vlan_srcmac_hash(skb);
+ return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
- !bond_flow_dissect(bond, skb, &flow))
- return bond_eth_hash(skb);
+ !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
+ return bond_eth_hash(skb, data, mhoff, hlen);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
- hash = bond_eth_hash(skb);
+ hash = bond_eth_hash(skb, data, mhoff, hlen);
} else {
if (flow.icmp.id)
memcpy(&hash, &flow.icmp, sizeof(hash));
@@ -3772,6 +3858,45 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
return bond_ip_hash(hash, &flow);
}
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ */
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+{
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
+ skb->l4_hash)
+ return skb->hash;
+
+ return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
+ skb->mac_header, skb->network_header,
+ skb_headlen(skb));
+}
+
+/**
+ * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @xdp: buffer to use for headers
+ *
+ * The XDP variant of bond_xmit_hash.
+ */
+static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
+{
+ struct ethhdr *eth;
+
+ if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
+ return 0;
+
+ eth = (struct ethhdr *)xdp->data;
+
+ return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
+ sizeof(struct ethhdr), xdp->data_end - xdp->data);
+}
+
/*-------------------------- Device entry points ----------------------------*/
void bond_work_init_all(struct bonding *bond)
@@ -3962,20 +4087,13 @@ static void bond_get_stats(struct net_device *bond_dev,
rcu_read_unlock();
}
-static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
+static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device *slave_dev = NULL;
- struct ifbond k_binfo;
- struct ifbond __user *u_binfo = NULL;
- struct ifslave k_sinfo;
- struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
- struct bond_opt_value newval;
- struct net *net;
- int res = 0;
+ int res;
- netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
+ netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
switch (cmd) {
case SIOCGMIIPHY:
@@ -4000,7 +4118,28 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
}
return 0;
- case BOND_INFO_QUERY_OLD:
+ default:
+ res = -EOPNOTSUPP;
+ }
+
+ return res;
+}
+
+static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct net_device *slave_dev = NULL;
+ struct ifbond k_binfo;
+ struct ifbond __user *u_binfo = NULL;
+ struct ifslave k_sinfo;
+ struct ifslave __user *u_sinfo = NULL;
+ struct bond_opt_value newval;
+ struct net *net;
+ int res = 0;
+
+ netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
+
+ switch (cmd) {
case SIOCBONDINFOQUERY:
u_binfo = (struct ifbond __user *)ifr->ifr_data;
@@ -4012,7 +4151,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return -EFAULT;
return 0;
- case BOND_SLAVE_INFO_QUERY_OLD:
case SIOCBONDSLAVEINFOQUERY:
u_sinfo = (struct ifslave __user *)ifr->ifr_data;
@@ -4042,19 +4180,15 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return -ENODEV;
switch (cmd) {
- case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
res = bond_enslave(bond_dev, slave_dev, NULL);
break;
- case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
break;
- case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
res = bond_set_dev_addr(bond_dev, slave_dev);
break;
- case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
bond_opt_initstr(&newval, slave_dev->name);
res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
@@ -4067,6 +4201,29 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return res;
}
+static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
+ void __user *data, int cmd)
+{
+ struct ifreq ifrdata = { .ifr_data = data };
+
+ switch (cmd) {
+ case BOND_INFO_QUERY_OLD:
+ return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
+ case BOND_SLAVE_INFO_QUERY_OLD:
+ return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
+ case BOND_ENSLAVE_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
+ case BOND_RELEASE_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
+ case BOND_SETHWADDR_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
+ case BOND_CHANGE_ACTIVE_OLD:
+ return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void bond_change_rx_flags(struct net_device *bond_dev, int change)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -4388,6 +4545,47 @@ non_igmp:
return NULL;
}
+static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
+ struct xdp_buff *xdp)
+{
+ struct slave *slave;
+ int slave_cnt;
+ u32 slave_id;
+ const struct ethhdr *eth;
+ void *data = xdp->data;
+
+ if (data + sizeof(struct ethhdr) > xdp->data_end)
+ goto non_igmp;
+
+ eth = (struct ethhdr *)data;
+ data += sizeof(struct ethhdr);
+
+ /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ const struct iphdr *iph;
+
+ if (data + sizeof(struct iphdr) > xdp->data_end)
+ goto non_igmp;
+
+ iph = (struct iphdr *)data;
+
+ if (iph->protocol == IPPROTO_IGMP) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ return slave;
+ return bond_get_slave_by_id(bond, 0);
+ }
+ }
+
+non_igmp:
+ slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (likely(slave_cnt)) {
+ slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
+ return bond_get_slave_by_id(bond, slave_id);
+ }
+ return NULL;
+}
+
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev)
{
@@ -4401,8 +4599,7 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
return bond_tx_drop(bond_dev, skb);
}
-static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
- struct sk_buff *skb)
+static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
{
return rcu_dereference(bond->curr_active_slave);
}
@@ -4416,7 +4613,7 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- slave = bond_xmit_activebackup_slave_get(bond, skb);
+ slave = bond_xmit_activebackup_slave_get(bond);
if (slave)
return bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -4604,6 +4801,22 @@ static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
return slave;
}
+static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
+ struct xdp_buff *xdp)
+{
+ struct bond_up_slave *slaves;
+ unsigned int count;
+ u32 hash;
+
+ hash = bond_xmit_hash_xdp(bond, xdp);
+ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (unlikely(!count))
+ return NULL;
+
+ return slaves->arr[hash % count];
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -4714,7 +4927,7 @@ static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
slave = bond_xmit_roundrobin_slave_get(bond, skb);
break;
case BOND_MODE_ACTIVEBACKUP:
- slave = bond_xmit_activebackup_slave_get(bond, skb);
+ slave = bond_xmit_activebackup_slave_get(bond);
break;
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
@@ -4888,6 +5101,172 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+static struct net_device *
+bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+
+ /* Caller needs to hold rcu_read_lock() */
+
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_ROUNDROBIN:
+ slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
+ break;
+
+ case BOND_MODE_ACTIVEBACKUP:
+ slave = bond_xmit_activebackup_slave_get(bond);
+ break;
+
+ case BOND_MODE_8023AD:
+ case BOND_MODE_XOR:
+ slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
+ break;
+
+ default:
+ /* Should never happen. Mode guarded by bond_xdp_check() */
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ if (slave)
+ return slave->dev;
+
+ return NULL;
+}
+
+static int bond_xdp_xmit(struct net_device *bond_dev,
+ int n, struct xdp_frame **frames, u32 flags)
+{
+ int nxmit, err = -ENXIO;
+
+ rcu_read_lock();
+
+ for (nxmit = 0; nxmit < n; nxmit++) {
+ struct xdp_frame *frame = frames[nxmit];
+ struct xdp_frame *frames1[] = {frame};
+ struct net_device *slave_dev;
+ struct xdp_buff xdp;
+
+ xdp_convert_frame_to_buff(frame, &xdp);
+
+ slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
+ if (!slave_dev) {
+ err = -ENXIO;
+ break;
+ }
+
+ err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
+ if (err < 1)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ /* If error happened on the first frame then we can pass the error up, otherwise
+ * report the number of frames that were xmitted.
+ */
+ if (err < 0)
+ return (nxmit == 0 ? err : nxmit);
+
+ return nxmit;
+}
+
+static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct list_head *iter;
+ struct slave *slave, *rollback_slave;
+ struct bpf_prog *old_prog;
+ struct netdev_bpf xdp = {
+ .command = XDP_SETUP_PROG,
+ .flags = 0,
+ .prog = prog,
+ .extack = extack,
+ };
+ int err;
+
+ ASSERT_RTNL();
+
+ if (!bond_xdp_check(bond))
+ return -EOPNOTSUPP;
+
+ old_prog = bond->xdp_prog;
+ bond->xdp_prog = prog;
+
+ bond_for_each_slave(bond, slave, iter) {
+ struct net_device *slave_dev = slave->dev;
+
+ if (!slave_dev->netdev_ops->ndo_bpf ||
+ !slave_dev->netdev_ops->ndo_xdp_xmit) {
+ SLAVE_NL_ERR(dev, slave_dev, extack,
+ "Slave device does not support XDP");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (dev_xdp_prog_count(slave_dev) > 0) {
+ SLAVE_NL_ERR(dev, slave_dev, extack,
+ "Slave has XDP program loaded, please unload before enslaving");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ if (err < 0) {
+ /* ndo_bpf() sets extack error message */
+ slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
+ goto err;
+ }
+ if (prog)
+ bpf_prog_inc(prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (prog)
+ static_branch_inc(&bpf_master_redirect_enabled_key);
+ else
+ static_branch_dec(&bpf_master_redirect_enabled_key);
+
+ return 0;
+
+err:
+ /* unwind the program changes */
+ bond->xdp_prog = old_prog;
+ xdp.prog = old_prog;
+ xdp.extack = NULL; /* do not overwrite original error */
+
+ bond_for_each_slave(bond, rollback_slave, iter) {
+ struct net_device *slave_dev = rollback_slave->dev;
+ int err_unwind;
+
+ if (slave == rollback_slave)
+ break;
+
+ err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ if (err_unwind < 0)
+ slave_err(dev, slave_dev,
+ "Error %d when unwinding XDP program change\n", err_unwind);
+ else if (xdp.prog)
+ bpf_prog_inc(xdp.prog);
+ }
+ return err;
+}
+
+static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return bond_xdp_set(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
{
if (speed == 0 || speed == SPEED_UNKNOWN)
@@ -4955,7 +5334,9 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_start_xmit = bond_start_xmit,
.ndo_select_queue = bond_select_queue,
.ndo_get_stats64 = bond_get_stats,
- .ndo_do_ioctl = bond_do_ioctl,
+ .ndo_eth_ioctl = bond_eth_ioctl,
+ .ndo_siocbond = bond_do_ioctl,
+ .ndo_siocdevprivate = bond_siocdevprivate,
.ndo_change_rx_flags = bond_change_rx_flags,
.ndo_set_rx_mode = bond_set_rx_mode,
.ndo_change_mtu = bond_change_mtu,
@@ -4974,6 +5355,9 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_get_xmit_slave = bond_xmit_get_slave,
.ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
+ .ndo_bpf = bond_xdp,
+ .ndo_xdp_xmit = bond_xdp_xmit,
+ .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
};
static const struct device_type bond_type = {
@@ -5443,6 +5827,7 @@ static int bond_check_params(struct bond_params *params)
params->downdelay = downdelay;
params->peer_notif_delay = 0;
params->use_carrier = use_carrier;
+ params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
params->primary_reselect = primary_reselect_value;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 0561ece1ba45..5d54e11d18fa 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -100,6 +100,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
[IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
[IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
+ [IFLA_BOND_AD_LACP_ACTIVE] = { .type = NLA_U8 },
[IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
[IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
[IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
@@ -387,6 +388,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+
+ if (data[IFLA_BOND_AD_LACP_ACTIVE]) {
+ int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]);
+
+ bond_opt_initval(&newval, lacp_active);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval);
+ if (err)
+ return err;
+ }
+
if (data[IFLA_BOND_AD_LACP_RATE]) {
int lacp_rate =
nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
@@ -490,6 +501,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_ACTIVE */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
@@ -622,6 +634,10 @@ static int bond_fill_info(struct sk_buff *skb,
packets_per_slave))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_AD_LACP_ACTIVE,
+ bond->params.lacp_active))
+ goto nla_put_failure;
+
if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
bond->params.lacp_fast))
goto nla_put_failure;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 0cf25de6f46d..a8fde3bc458f 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -58,6 +58,8 @@ static int bond_option_lp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_pps_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_lacp_active_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_lacp_rate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_select_set(struct bonding *bond,
@@ -135,6 +137,12 @@ static const struct bond_opt_value bond_intmax_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_lacp_active[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
static const struct bond_opt_value bond_lacp_rate_tbl[] = {
{ "slow", AD_LACP_SLOW, 0},
{ "fast", AD_LACP_FAST, 0},
@@ -283,6 +291,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_updelay_set
},
+ [BOND_OPT_LACP_ACTIVE] = {
+ .id = BOND_OPT_LACP_ACTIVE,
+ .name = "lacp_active",
+ .desc = "Send LACPDU frames with configured lacp rate or acts as speak when spoken to",
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_lacp_active,
+ .set = bond_option_lacp_active_set
+ },
[BOND_OPT_LACP_RATE] = {
.id = BOND_OPT_LACP_RATE,
.name = "lacp_rate",
@@ -1333,6 +1350,16 @@ static int bond_option_pps_set(struct bonding *bond,
return 0;
}
+static int bond_option_lacp_active_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.lacp_active = newval->value;
+
+ return 0;
+}
+
static int bond_option_lacp_rate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 0fb1da361bb1..f3e3bfd72556 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -133,6 +133,8 @@ static void bond_info_show_master(struct seq_file *seq)
struct ad_info ad_info;
seq_puts(seq, "\n802.3ad info\n");
+ seq_printf(seq, "LACP active: %s\n",
+ (bond->params.lacp_active) ? "on" : "off");
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
seq_printf(seq, "Min links: %d\n", bond->params.min_links);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f9e9a240226..b9e9842fed94 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -339,10 +339,24 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
-/* Show the LACP interval. */
-static ssize_t bonding_show_lacp(struct device *d,
- struct device_attribute *attr,
- char *buf)
+/* Show the LACP activity and interval. */
+static ssize_t bonding_show_lacp_active(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+ const struct bond_opt_value *val;
+
+ val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
+
+ return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+}
+static DEVICE_ATTR(lacp_active, 0644,
+ bonding_show_lacp_active, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_lacp_rate(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
@@ -352,7 +366,7 @@ static ssize_t bonding_show_lacp(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
- bonding_show_lacp, bonding_sysfs_store_option);
+ bonding_show_lacp_rate, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
struct device_attribute *attr,
@@ -738,6 +752,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_downdelay.attr,
&dev_attr_updelay.attr,
&dev_attr_peer_notif_delay.attr,
+ &dev_attr_lacp_active.attr,
&dev_attr_lacp_rate.attr,
&dev_attr_ad_select.attr,
&dev_attr_xmit_hash_policy.attr,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e355d3974977..fff259247d52 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -97,7 +97,8 @@ config CAN_AT91
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on OF && HAS_IOMEM
+ depends on OF || COLDFIRE || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 04d0bb3ffe89..b06af90a9964 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -43,14 +43,14 @@ enum at91_reg {
};
/* Mailbox registers (0 <= i <= 15) */
-#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
-#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
-#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
-#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
-#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
-#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
-#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
-#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
+#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20)))
+#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20)))
+#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20)))
+#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20)))
+#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20)))
+#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20)))
+#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20)))
+#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20)))
/* Register bits */
#define AT91_MR_CANEN BIT(0)
@@ -87,19 +87,19 @@ enum at91_mb_mode {
};
/* Interrupt mask bits */
-#define AT91_IRQ_ERRA (1 << 16)
-#define AT91_IRQ_WARN (1 << 17)
-#define AT91_IRQ_ERRP (1 << 18)
-#define AT91_IRQ_BOFF (1 << 19)
-#define AT91_IRQ_SLEEP (1 << 20)
-#define AT91_IRQ_WAKEUP (1 << 21)
-#define AT91_IRQ_TOVF (1 << 22)
-#define AT91_IRQ_TSTP (1 << 23)
-#define AT91_IRQ_CERR (1 << 24)
-#define AT91_IRQ_SERR (1 << 25)
-#define AT91_IRQ_AERR (1 << 26)
-#define AT91_IRQ_FERR (1 << 27)
-#define AT91_IRQ_BERR (1 << 28)
+#define AT91_IRQ_ERRA BIT(16)
+#define AT91_IRQ_WARN BIT(17)
+#define AT91_IRQ_ERRP BIT(18)
+#define AT91_IRQ_BOFF BIT(19)
+#define AT91_IRQ_SLEEP BIT(20)
+#define AT91_IRQ_WAKEUP BIT(21)
+#define AT91_IRQ_TOVF BIT(22)
+#define AT91_IRQ_TSTP BIT(23)
+#define AT91_IRQ_CERR BIT(24)
+#define AT91_IRQ_SERR BIT(25)
+#define AT91_IRQ_AERR BIT(26)
+#define AT91_IRQ_FERR BIT(27)
+#define AT91_IRQ_BERR BIT(28)
#define AT91_IRQ_ERR_ALL (0x1fff0000)
#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
@@ -163,7 +163,7 @@ static const struct can_bittiming_const at91_bittiming_const = {
.tseg2_min = 2,
.tseg2_max = 8,
.sjw_max = 4,
- .brp_min = 2,
+ .brp_min = 2,
.brp_max = 128,
.brp_inc = 1,
};
@@ -281,19 +281,20 @@ static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
- u32 value)
+ u32 value)
{
writel_relaxed(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
- unsigned int mb, enum at91_mb_mode mode, int prio)
+ unsigned int mb, enum at91_mb_mode mode,
+ int prio)
{
at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
}
static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
- enum at91_mb_mode mode)
+ enum at91_mb_mode mode)
{
set_mb_mode_prio(priv, mb, mode, 0);
}
@@ -316,8 +317,7 @@ static void at91_setup_mailboxes(struct net_device *dev)
unsigned int i;
u32 reg_mid;
- /*
- * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+ /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
* mailbox is disabled. The next 11 mailboxes are used as a
* reception FIFO. The last mailbox is configured with
* overwrite option. The overwrite flag indicates a FIFO
@@ -368,7 +368,7 @@ static int at91_set_bittiming(struct net_device *dev)
}
static int at91_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_ecr = at91_read(priv, AT91_ECR);
@@ -423,8 +423,7 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
priv->can.state = state;
}
-/*
- * theory of operation:
+/* theory of operation:
*
* According to the datasheet priority 0 is the highest priority, 15
* is the lowest. If two mailboxes have the same priority level the
@@ -486,8 +485,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
- /*
- * we have to stop the queue and deliver all messages in case
+ /* we have to stop the queue and deliver all messages in case
* of a prio+mb counter wrap around. This is the case if
* tx_next buffer prio and mailbox equals 0.
*
@@ -515,6 +513,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
static inline void at91_activate_rx_low(const struct at91_priv *priv)
{
u32 mask = get_mb_rx_low_mask(priv);
+
at91_write(priv, AT91_TCR, mask);
}
@@ -526,9 +525,10 @@ static inline void at91_activate_rx_low(const struct at91_priv *priv)
* Reenables given mailbox for reception of new CAN messages
*/
static inline void at91_activate_rx_mb(const struct at91_priv *priv,
- unsigned int mb)
+ unsigned int mb)
{
u32 mask = 1 << mb;
+
at91_write(priv, AT91_TCR, mask);
}
@@ -568,7 +568,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
* given can frame. "mb" and "cf" must be valid.
*/
static void at91_read_mb(struct net_device *dev, unsigned int mb,
- struct can_frame *cf)
+ struct can_frame *cf)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_msr, reg_mid;
@@ -582,9 +582,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
reg_msr = at91_read(priv, AT91_MSR(mb));
cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf);
- if (reg_msr & AT91_MSR_MRTR)
+ if (reg_msr & AT91_MSR_MRTR) {
cf->can_id |= CAN_RTR_FLAG;
- else {
+ } else {
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
}
@@ -685,7 +685,7 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > get_mb_rx_low_last(priv) &&
reg_sr & get_mb_rx_low_mask(priv))
netdev_info(dev,
- "order of incoming frames cannot be guaranteed\n");
+ "order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
@@ -718,7 +718,7 @@ static int at91_poll_rx(struct net_device *dev, int quota)
}
static void at91_poll_err_frame(struct net_device *dev,
- struct can_frame *cf, u32 reg_sr)
+ struct can_frame *cf, u32 reg_sr)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -796,8 +796,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
if (reg_sr & get_irq_mb_rx(priv))
work_done += at91_poll_rx(dev, quota - work_done);
- /*
- * The error bits are clear on read,
+ /* The error bits are clear on read,
* so use saved value from irq handler.
*/
reg_sr |= priv->reg_sr;
@@ -807,6 +806,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
if (work_done < quota) {
/* enable IRQs for frame errors and all mailboxes >= rx_next */
u32 reg_ier = AT91_IRQ_ERR_FRAME;
+
reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
napi_complete_done(napi, work_done);
@@ -816,8 +816,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
return work_done;
}
-/*
- * theory of operation:
+/* theory of operation:
*
* priv->tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
@@ -846,8 +845,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
/* Disable irq for this TX mailbox */
at91_write(priv, AT91_IDR, 1 << mb);
- /*
- * only echo if mailbox signals us a transfer
+ /* only echo if mailbox signals us a transfer
* complete (MSR_MRDY). Otherwise it's a tansfer
* abort. "can_bus_off()" takes care about the skbs
* parked in the echo queue.
@@ -862,8 +860,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
}
}
- /*
- * restart queue if we don't have a wrap around but restart if
+ /* restart queue if we don't have a wrap around but restart if
* we get a TX int for the last can frame directly before a
* wrap around.
*/
@@ -873,7 +870,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
}
static void at91_irq_err_state(struct net_device *dev,
- struct can_frame *cf, enum can_state new_state)
+ struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_idr = 0, reg_ier = 0;
@@ -883,8 +880,7 @@ static void at91_irq_err_state(struct net_device *dev,
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
- /*
- * from: ERROR_ACTIVE
+ /* from: ERROR_ACTIVE
* to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
* => : there was a warning int
*/
@@ -900,8 +896,7 @@ static void at91_irq_err_state(struct net_device *dev,
}
fallthrough;
case CAN_STATE_ERROR_WARNING:
- /*
- * from: ERROR_ACTIVE, ERROR_WARNING
+ /* from: ERROR_ACTIVE, ERROR_WARNING
* to : ERROR_PASSIVE, BUS_OFF
* => : error passive int
*/
@@ -917,8 +912,7 @@ static void at91_irq_err_state(struct net_device *dev,
}
break;
case CAN_STATE_BUS_OFF:
- /*
- * from: BUS_OFF
+ /* from: BUS_OFF
* to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
*/
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
@@ -935,12 +929,10 @@ static void at91_irq_err_state(struct net_device *dev,
break;
}
-
/* process state changes depending on the new state */
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
- /*
- * actually we want to enable AT91_IRQ_WARN here, but
+ /* actually we want to enable AT91_IRQ_WARN here, but
* it screws up the system under certain
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
@@ -983,7 +975,7 @@ static void at91_irq_err_state(struct net_device *dev,
}
static int at91_get_state_by_bec(const struct net_device *dev,
- enum can_state *state)
+ enum can_state *state)
{
struct can_berr_counter bec;
int err;
@@ -1004,7 +996,6 @@ static int at91_get_state_by_bec(const struct net_device *dev,
return 0;
}
-
static void at91_irq_err(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -1018,15 +1009,15 @@ static void at91_irq_err(struct net_device *dev)
reg_sr = at91_read(priv, AT91_SR);
/* we need to look at the unmasked reg_sr */
- if (unlikely(reg_sr & AT91_IRQ_BOFF))
+ if (unlikely(reg_sr & AT91_IRQ_BOFF)) {
new_state = CAN_STATE_BUS_OFF;
- else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+ } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) {
new_state = CAN_STATE_ERROR_PASSIVE;
- else if (unlikely(reg_sr & AT91_IRQ_WARN))
+ } else if (unlikely(reg_sr & AT91_IRQ_WARN)) {
new_state = CAN_STATE_ERROR_WARNING;
- else if (likely(reg_sr & AT91_IRQ_ERRA))
+ } else if (likely(reg_sr & AT91_IRQ_ERRA)) {
new_state = CAN_STATE_ERROR_ACTIVE;
- else {
+ } else {
netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@@ -1053,8 +1044,7 @@ static void at91_irq_err(struct net_device *dev)
priv->can.state = new_state;
}
-/*
- * interrupt handler
+/* interrupt handler
*/
static irqreturn_t at91_irq(int irq, void *dev_id)
{
@@ -1075,8 +1065,7 @@ static irqreturn_t at91_irq(int irq, void *dev_id)
/* Receive or error interrupt? -> napi */
if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
- /*
- * The error bits are clear on read,
+ /* The error bits are clear on read,
* save for later use.
*/
priv->reg_sr = reg_sr;
@@ -1133,8 +1122,7 @@ static int at91_open(struct net_device *dev)
return err;
}
-/*
- * stop CAN bus activity
+/* stop CAN bus activity
*/
static int at91_close(struct net_device *dev)
{
@@ -1176,8 +1164,8 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
-static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mb0_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct at91_priv *priv = netdev_priv(to_net_dev(dev));
@@ -1187,8 +1175,9 @@ static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
}
-static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t mb0_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct net_device *ndev = to_net_dev(dev);
struct at91_priv *priv = netdev_priv(ndev);
@@ -1222,7 +1211,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
return ret;
}
-static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+static DEVICE_ATTR_RW(mb0_id);
static struct attribute *at91_sysfs_attrs[] = {
&dev_attr_mb0_id.attr,
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 4247ff80a29c..08b6efa7a1a7 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -176,6 +176,13 @@ struct c_can_raminit {
bool needs_pulse;
};
+/* c_can tx ring structure */
+struct c_can_tx_ring {
+ unsigned int head;
+ unsigned int tail;
+ unsigned int obj_num;
+};
+
/* c_can private data structure */
struct c_can_priv {
struct can_priv can; /* must be the first member */
@@ -190,17 +197,16 @@ struct c_can_priv {
unsigned int msg_obj_tx_first;
unsigned int msg_obj_tx_last;
u32 msg_obj_rx_mask;
- atomic_t tx_active;
atomic_t sie_pending;
unsigned long tx_dir;
int last_status;
+ struct c_can_tx_ring tx;
u16 (*read_reg)(const struct c_can_priv *priv, enum reg index);
void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val);
u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index);
void (*write_reg32)(const struct c_can_priv *priv, enum reg index, u32 val);
void __iomem *base;
const u16 *regs;
- void *priv; /* for board-specific data */
enum c_can_dev_id type;
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit)(const struct c_can_priv *priv, bool enable);
@@ -220,4 +226,19 @@ int c_can_power_down(struct net_device *dev);
void c_can_set_ethtool_ops(struct net_device *dev);
+static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+{
+ return ring->obj_num - (ring->head - ring->tail);
+}
+
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index 7588f70ca0fe..52671d1ea17d 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -160,8 +160,8 @@
#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
-/* Use IF1 for RX and IF2 for TX */
-#define IF_RX 0
+/* Use IF1 in NAPI path and IF2 in TX path */
+#define IF_NAPI 0
#define IF_TX 1
/* minimum timeout for checking BUSY status */
@@ -427,24 +427,51 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
}
+static bool c_can_tx_busy(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *tx_ring)
+{
+ if (c_can_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->dev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (c_can_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->dev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+ return true;
+ }
+
+ netif_start_queue(priv->dev);
+ return false;
+}
+
static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct can_frame *frame = (struct can_frame *)skb->data;
struct c_can_priv *priv = netdev_priv(dev);
- u32 idx, obj;
+ struct c_can_tx_ring *tx_ring = &priv->tx;
+ u32 idx, obj, cmd = IF_COMM_TX;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- /* This is not a FIFO. C/D_CAN sends out the buffers
- * prioritized. The lowest buffer number wins.
- */
- idx = fls(atomic_read(&priv->tx_active));
- obj = idx + priv->msg_obj_tx_first;
- /* If this is the last buffer, stop the xmit queue */
- if (idx == priv->msg_obj_tx_num - 1)
+ if (c_can_tx_busy(priv, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ idx = c_can_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (c_can_get_tx_free(tx_ring) == 0)
netif_stop_queue(dev);
+
+ if (idx < c_can_get_tx_tail(tx_ring))
+ cmd &= ~IF_COMM_TXRQST; /* Cache the message */
+
/* Store the message in the interface so we can call
* can_put_echo_skb(). We must do this before we enable
* transmit as we might race against do_tx().
@@ -452,11 +479,8 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
c_can_setup_tx_object(dev, IF_TX, frame, idx);
priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx, 0);
-
- /* Update the active bits */
- atomic_add(BIT(idx), &priv->tx_active);
- /* Start transmission */
- c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
+ obj = idx + priv->msg_obj_tx_first;
+ c_can_object_put(dev, IF_TX, obj, cmd);
return NETDEV_TX_OK;
}
@@ -529,13 +553,13 @@ static void c_can_configure_msg_objects(struct net_device *dev)
/* first invalidate all message objects */
for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++)
- c_can_inval_msg_object(dev, IF_RX, i);
+ c_can_inval_msg_object(dev, IF_NAPI, i);
/* setup receive message objects */
for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++)
- c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
+ c_can_setup_receive_object(dev, IF_NAPI, i, 0, 0, IF_MCONT_RCV);
- c_can_setup_receive_object(dev, IF_RX, priv->msg_obj_rx_last, 0, 0,
+ c_can_setup_receive_object(dev, IF_NAPI, priv->msg_obj_rx_last, 0, 0,
IF_MCONT_RCV_EOB);
}
@@ -567,6 +591,7 @@ static int c_can_software_reset(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
+ struct c_can_tx_ring *tx_ring = &priv->tx;
int err;
err = c_can_software_reset(dev);
@@ -598,7 +623,8 @@ static int c_can_chip_config(struct net_device *dev)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* Clear all internal status */
- atomic_set(&priv->tx_active, 0);
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
priv->tx_dir = 0;
/* set bittiming params */
@@ -696,40 +722,57 @@ static int c_can_get_berr_counter(const struct net_device *dev,
static void c_can_do_tx(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
+ struct c_can_tx_ring *tx_ring = &priv->tx;
struct net_device_stats *stats = &dev->stats;
- u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
+ u32 idx, obj, pkts = 0, bytes = 0, pend;
+ u8 tail;
if (priv->msg_obj_tx_last > 32)
pend = priv->read_reg32(priv, C_CAN_INTPND3_REG);
else
pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
- clr = pend;
while ((idx = ffs(pend))) {
idx--;
pend &= ~BIT(idx);
obj = idx + priv->msg_obj_tx_first;
- /* We use IF_RX interface instead of IF_TX because we
+ /* We use IF_NAPI interface instead of IF_TX because we
* are called from c_can_poll(), which runs inside
- * NAPI. We are not trasmitting.
+ * NAPI. We are not transmitting.
*/
- c_can_inval_tx_object(dev, IF_RX, obj);
+ c_can_inval_tx_object(dev, IF_NAPI, obj);
can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
pkts++;
}
- /* Clear the bits in the tx_active mask */
- atomic_sub(clr, &priv->tx_active);
+ if (!pkts)
+ return;
- if (clr & BIT(priv->msg_obj_tx_num - 1))
- netif_wake_queue(dev);
+ tx_ring->tail += pkts;
+ if (c_can_get_tx_free(tx_ring)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->dev);
+ }
- if (pkts) {
- stats->tx_bytes += bytes;
- stats->tx_packets += pkts;
- can_led_event(dev, CAN_LED_EVENT_TX);
+ stats->tx_bytes += bytes;
+ stats->tx_packets += pkts;
+ can_led_event(dev, CAN_LED_EVENT_TX);
+
+ tail = c_can_get_tx_tail(tx_ring);
+
+ if (tail == 0) {
+ u8 head = c_can_get_tx_head(tx_ring);
+
+ /* Start transmission for all cached messages */
+ for (idx = tail; idx < head; idx++) {
+ obj = idx + priv->msg_obj_tx_first;
+ c_can_object_put(dev, IF_NAPI, obj, IF_COMM_TXRQST);
+ }
}
}
@@ -766,14 +809,14 @@ static u32 c_can_adjust_pending(u32 pend, u32 rx_mask)
static inline void c_can_rx_object_get(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
- c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
+ c_can_object_get(dev, IF_NAPI, obj, priv->comm_rcv_high);
}
static inline void c_can_rx_finalize(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
if (priv->type != BOSCH_D_CAN)
- c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
+ c_can_object_get(dev, IF_NAPI, obj, IF_COMM_CLR_NEWDAT);
}
static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
@@ -785,10 +828,12 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
pend &= ~BIT(obj - 1);
c_can_rx_object_get(dev, priv, obj);
- ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
+ ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_NAPI));
if (ctrl & IF_MCONT_MSGLST) {
- int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
+ int n;
+
+ n = c_can_handle_lost_msg_obj(dev, IF_NAPI, obj, ctrl);
pkts += n;
quota -= n;
@@ -803,7 +848,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
continue;
/* read the data from the message object */
- c_can_read_msg_object(dev, IF_RX, ctrl);
+ c_can_read_msg_object(dev, IF_NAPI, ctrl);
c_can_rx_finalize(dev, priv, obj);
@@ -1205,6 +1250,10 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
priv->msg_obj_tx_last =
priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1;
+ priv->tx.head = 0;
+ priv->tx.tail = 0;
+ priv->tx.obj_num = msg_obj_tx_num;
+
netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
priv->dev = dev;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 36950363682f..86e95e9d6533 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -385,7 +385,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->base = addr;
priv->device = &pdev->dev;
priv->can.clock.freq = clk_get_rate(clk);
- priv->priv = clk;
priv->type = drvdata->id;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 311d8564d611..e3d840b81357 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -15,6 +15,7 @@
#include <linux/can/dev.h>
#include <linux/can/skb.h>
#include <linux/can/led.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#define MOD_DESC "CAN device driver interface"
@@ -400,10 +401,69 @@ void close_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(close_candev);
+static int can_set_termination(struct net_device *ndev, u16 term)
+{
+ struct can_priv *priv = netdev_priv(ndev);
+ int set;
+
+ if (term == priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED])
+ set = 1;
+ else
+ set = 0;
+
+ gpiod_set_value(priv->termination_gpio, set);
+
+ return 0;
+}
+
+static int can_get_termination(struct net_device *ndev)
+{
+ struct can_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
+ struct gpio_desc *gpio;
+ u32 term;
+ int ret;
+
+ /* Disabling termination by default is the safe choice: Else if many
+ * bus participants enable it, no communication is possible at all.
+ */
+ gpio = devm_gpiod_get_optional(dev, "termination", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return dev_err_probe(dev, PTR_ERR(gpio),
+ "Cannot get termination-gpios\n");
+
+ if (!gpio)
+ return 0;
+
+ ret = device_property_read_u32(dev, "termination-ohms", &term);
+ if (ret) {
+ netdev_err(ndev, "Cannot get termination-ohms: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (term > U16_MAX) {
+ netdev_err(ndev, "Invalid termination-ohms value (%u > %u)\n",
+ term, U16_MAX);
+ return -EINVAL;
+ }
+
+ priv->termination_const_cnt = ARRAY_SIZE(priv->termination_gpio_ohms);
+ priv->termination_const = priv->termination_gpio_ohms;
+ priv->termination_gpio = gpio;
+ priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_DISABLED] =
+ CAN_TERMINATION_DISABLED;
+ priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED] = term;
+ priv->do_set_termination = can_set_termination;
+
+ return 0;
+}
+
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
+ int err;
/* Ensure termination_const, termination_const_cnt and
* do_set_termination consistency. All must be either set or
@@ -419,6 +479,12 @@ int register_candev(struct net_device *dev)
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
return -EINVAL;
+ if (!priv->termination_const) {
+ err = can_get_termination(dev);
+ if (err)
+ return err;
+ }
+
dev->rtnl_link_ops = &can_link_ops;
netif_carrier_off(dev);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index e38c2566aff4..80425636049d 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -47,7 +47,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_CAN_DATA_BITTIMING]) {
- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ if (!is_can_fd)
return -EOPNOTSUPP;
}
@@ -116,7 +116,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
maskedflags = cm->flags & cm->mask;
/* check whether provided bits are allowed to be passed */
- if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+ if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
return -EOPNOTSUPP;
/* do not check for static fd-non-iso if 'fd' is disabled */
@@ -132,10 +132,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->ctrlmode |= maskedflags;
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ if (priv->ctrlmode & CAN_CTRLMODE_FD) {
dev->mtu = CANFD_MTU;
- else
+ } else {
dev->mtu = CAN_MTU;
+ memset(&priv->data_bittiming, 0,
+ sizeof(priv->data_bittiming));
+ }
}
if (data[IFLA_CAN_RESTART_MS]) {
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index ab2c1543786c..37b0cc65237b 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 Protonic Holland,
* David Jander
- * Copyright (C) 2014-2017 Pengutronix,
+ * Copyright (C) 2014-2021 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de>
*/
@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 pending)
{
- struct sk_buff_head skb_queue;
unsigned int i;
-
- __skb_queue_head_init(&skb_queue);
+ int received = 0;
for (i = offload->mb_first;
can_rx_offload_le(offload, i, offload->mb_last);
@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
if (IS_ERR_OR_NULL(skb))
continue;
- __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
- }
-
- if (!skb_queue_empty(&skb_queue)) {
- unsigned long flags;
- u32 queue_len;
-
- spin_lock_irqsave(&offload->skb_queue.lock, flags);
- skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
- queue_len = skb_queue_len(&offload->skb_queue);
- if (queue_len > offload->skb_queue_len_max / 8)
- netdev_dbg(offload->dev, "%s: queue_len=%d\n",
- __func__, queue_len);
-
- can_rx_offload_schedule(offload);
+ __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+ can_rx_offload_compare);
+ received++;
}
- return skb_queue_len(&skb_queue);
+ return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
if (!skb)
break;
- skb_queue_tail(&offload->skb_queue, skb);
+ __skb_queue_tail(&offload->skb_irq_queue, skb);
received++;
}
- if (received)
- can_rx_offload_schedule(offload);
-
return received;
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
- unsigned long flags;
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
- spin_lock_irqsave(&offload->skb_queue.lock, flags);
- __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
- can_rx_offload_schedule(offload);
+ __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+ can_rx_offload_compare);
return 0;
}
@@ -295,13 +272,56 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
return -ENOBUFS;
}
- skb_queue_tail(&offload->skb_queue, skb);
- can_rx_offload_schedule(offload);
+ __skb_queue_tail(&offload->skb_irq_queue, skb);
return 0;
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload)
+{
+ unsigned long flags;
+ int queue_len;
+
+ if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+ return;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ napi_schedule(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
+
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
+{
+ unsigned long flags;
+ int queue_len;
+
+ if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+ return;
+
+ spin_lock_irqsave(&offload->skb_queue.lock, flags);
+ skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+ queue_len = skb_queue_len(&offload->skb_queue);
+ if (queue_len > offload->skb_queue_len_max / 8)
+ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+ __func__, queue_len);
+
+ local_bh_disable();
+ napi_schedule(&offload->napi);
+ local_bh_enable();
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
+
static int can_rx_offload_init_queue(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight)
@@ -312,6 +332,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
+ __skb_queue_head_init(&offload->skb_irq_queue);
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
@@ -373,5 +394,6 @@ void can_rx_offload_del(struct can_rx_offload *offload)
{
netif_napi_del(&offload->napi);
skb_queue_purge(&offload->skb_queue);
+ __skb_queue_purge(&offload->skb_irq_queue);
}
EXPORT_SYMBOL_GPL(can_rx_offload_del);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 57f3635ad8d7..7734229aa078 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/can/platform/flexcan.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -208,18 +209,19 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
* Filter? connected? Passive detection ption in MB Supported?
- * MX25 FlexCAN2 03.00.00.00 no no no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no no
- * MX35 FlexCAN2 03.00.00.00 no no no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no
- * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes
- * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes
- * VF610 FlexCAN3 ? no yes no yes yes? no
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
- * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes
+ * MCF5441X FlexCAN2 ? no yes no no yes no 16
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
+ * VF610 FlexCAN3 ? no yes no yes yes? no 64
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -246,6 +248,10 @@
#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
/* Setup stop mode with SCU firmware to support wakeup */
#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
+/* Setup 3 separate interrupts, main, boff and err */
+#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
+/* Setup 16 mailboxes */
+#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -363,6 +369,9 @@ struct flexcan_priv {
struct regulator *reg_xceiver;
struct flexcan_stop_mode stm;
+ int irq_boff;
+ int irq_err;
+
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;
@@ -371,6 +380,11 @@ struct flexcan_priv {
void (*write)(u32 val, void __iomem *addr);
};
+static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16,
+};
+
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
@@ -635,15 +649,19 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
static int flexcan_clks_enable(const struct flexcan_priv *priv)
{
- int err;
+ int err = 0;
- err = clk_prepare_enable(priv->clk_ipg);
- if (err)
- return err;
+ if (priv->clk_ipg) {
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+ }
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- clk_disable_unprepare(priv->clk_ipg);
+ if (priv->clk_per) {
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ clk_disable_unprepare(priv->clk_ipg);
+ }
return err;
}
@@ -1198,6 +1216,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
}
}
+ if (handled)
+ can_rx_offload_irq_finish(&priv->offload);
+
return handled;
}
@@ -1401,8 +1422,12 @@ static int flexcan_rx_offload_setup(struct net_device *dev)
priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
else
priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
- priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
- (sizeof(priv->regs->mb[1]) / priv->mb_size);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16)
+ priv->mb_count = 16;
+ else
+ priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
+ (sizeof(priv->regs->mb[1]) / priv->mb_size);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
priv->tx_mb_reserved =
@@ -1774,6 +1799,18 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_can_rx_offload_disable;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ err = request_irq(priv->irq_boff,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq;
+
+ err = request_irq(priv->irq_err,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_boff;
+ }
+
flexcan_chip_interrupts_enable(dev);
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -1782,6 +1819,10 @@ static int flexcan_open(struct net_device *dev)
return 0;
+ out_free_irq_boff:
+ free_irq(priv->irq_boff, dev);
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
flexcan_chip_stop(dev);
@@ -1803,6 +1844,12 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ free_irq(priv->irq_err, dev);
+ free_irq(priv->irq_boff, dev);
+ }
+
free_irq(dev->irq, dev);
can_rx_offload_disable(&priv->offload);
flexcan_chip_stop_disable_on_error(dev);
@@ -2039,14 +2086,26 @@ static const struct of_device_id flexcan_of_match[] = {
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
+static const struct platform_device_id flexcan_id_table[] = {
+ {
+ .name = "flexcan-mcf5441x",
+ .driver_data = (kernel_ulong_t)&fsl_mcf5441x_devtype_data,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
+
static int flexcan_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
+ struct flexcan_platform_data *pdata;
int err, irq;
u8 clk_src = 1;
u32 clock_freq = 0;
@@ -2064,6 +2123,12 @@ static int flexcan_probe(struct platform_device *pdev)
"clock-frequency", &clock_freq);
of_property_read_u8(pdev->dev.of_node,
"fsl,clk-source", &clk_src);
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ clock_freq = pdata->clock_frequency;
+ clk_src = pdata->clk_src;
+ }
}
if (!clock_freq) {
@@ -2089,7 +2154,14 @@ static int flexcan_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- devtype_data = of_device_get_match_data(&pdev->dev);
+ of_id = of_match_device(flexcan_of_match, &pdev->dev);
+ if (of_id)
+ devtype_data = of_id->data;
+ else if (platform_get_device_id(pdev)->driver_data)
+ devtype_data = (struct flexcan_devtype_data *)
+ platform_get_device_id(pdev)->driver_data;
+ else
+ return -ENODEV;
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
@@ -2133,6 +2205,19 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ priv->irq_boff = platform_get_irq(pdev, 1);
+ if (priv->irq_boff <= 0) {
+ err = -ENODEV;
+ goto failed_platform_get_irq;
+ }
+ priv->irq_err = platform_get_irq(pdev, 2);
+ if (priv->irq_err <= 0) {
+ err = -ENODEV;
+ goto failed_platform_get_irq;
+ }
+ }
+
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
@@ -2170,6 +2255,7 @@ static int flexcan_probe(struct platform_device *pdev)
failed_register:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ failed_platform_get_irq:
free_candev(dev);
return err;
}
@@ -2322,6 +2408,7 @@ static struct platform_driver flexcan_driver = {
},
.probe = flexcan_probe,
.remove = flexcan_remove,
+ .id_table = flexcan_id_table,
};
module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 2a6c918186c0..c68ad56628bd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1815,9 +1815,9 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
* Sysfs Attributes
*/
-static ssize_t ican3_sysfs_show_term(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t termination_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
int ret;
@@ -1834,9 +1834,9 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
}
-static ssize_t ican3_sysfs_set_term(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t termination_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
unsigned long enable;
@@ -1852,18 +1852,17 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
-static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t fwinfo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
}
-static DEVICE_ATTR(termination, 0644, ican3_sysfs_show_term,
- ican3_sysfs_set_term);
-static DEVICE_ATTR(fwinfo, 0444, ican3_sysfs_show_fwinfo, NULL);
+static DEVICE_ATTR_RW(termination);
+static DEVICE_ATTR_RO(fwinfo);
static struct attribute *ican3_sysfs_attrs[] = {
&dev_attr_termination.attr,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 43bca315a66c..2470c47b2e31 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -21,6 +21,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
#include "m_can.h"
@@ -278,7 +279,7 @@ enum m_can_reg {
/* Message RAM Elements */
#define M_CAN_FIFO_ID 0x0
#define M_CAN_FIFO_DLC 0x4
-#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
+#define M_CAN_FIFO_DATA 0x8
/* Rx Buffer Element */
/* R0 */
@@ -308,6 +309,15 @@ enum m_can_reg {
#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
+/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
+ * and we can save a (potentially slow) bus round trip by combining
+ * reads and writes to them.
+ */
+struct id_and_dlc {
+ u32 id;
+ u32 dlc;
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -319,36 +329,39 @@ static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
cdev->ops->write_reg(cdev, reg, val);
}
-static u32 m_can_fifo_read(struct m_can_classdev *cdev,
- u32 fgi, unsigned int offset)
+static int
+m_can_fifo_read(struct m_can_classdev *cdev,
+ u32 fgi, unsigned int offset, void *val, size_t val_count)
{
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
- return cdev->ops->read_fifo(cdev, addr_offset);
+ return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
-static void m_can_fifo_write(struct m_can_classdev *cdev,
- u32 fpi, unsigned int offset, u32 val)
+static int
+m_can_fifo_write(struct m_can_classdev *cdev,
+ u32 fpi, unsigned int offset, const void *val, size_t val_count)
{
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
- cdev->ops->write_fifo(cdev, addr_offset, val);
+ return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
-static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev,
- u32 fpi, u32 val)
+static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev,
+ u32 fpi, u32 val)
{
- cdev->ops->write_fifo(cdev, fpi, val);
+ return cdev->ops->write_fifo(cdev, fpi, &val, 1);
}
-static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset)
+static int
+m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
{
u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
offset;
- return cdev->ops->read_fifo(cdev, addr_offset);
+ return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
@@ -436,7 +449,7 @@ static void m_can_clean(struct net_device *net)
* napi. For non-peripherals, RX is done in napi already, so push
* directly. timestamp is used to ensure good skb ordering in
* rx-offload and is ignored for non-peripherals.
-*/
+ */
static void m_can_receive_skb(struct m_can_classdev *cdev,
struct sk_buff *skb,
u32 timestamp)
@@ -454,54 +467,57 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
}
}
-static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
+static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
{
struct net_device_stats *stats = &dev->stats;
struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf;
struct sk_buff *skb;
- u32 id, fgi, dlc;
+ struct id_and_dlc fifo_header;
+ u32 fgi;
u32 timestamp = 0;
- int i;
+ int err;
/* calculate the fifo get index for where to read data */
fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
- dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC);
- if (dlc & RX_BUF_FDF)
+ err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
+ if (err)
+ goto out_fail;
+
+ if (fifo_header.dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
else
skb = alloc_can_skb(dev, (struct can_frame **)&cf);
if (!skb) {
stats->rx_dropped++;
- return;
+ return 0;
}
- if (dlc & RX_BUF_FDF)
- cf->len = can_fd_dlc2len((dlc >> 16) & 0x0F);
+ if (fifo_header.dlc & RX_BUF_FDF)
+ cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F);
else
- cf->len = can_cc_dlc2len((dlc >> 16) & 0x0F);
+ cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F);
- id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID);
- if (id & RX_BUF_XTD)
- cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ if (fifo_header.id & RX_BUF_XTD)
+ cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
- cf->can_id = (id >> 18) & CAN_SFF_MASK;
+ cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK;
- if (id & RX_BUF_ESI) {
+ if (fifo_header.id & RX_BUF_ESI) {
cf->flags |= CANFD_ESI;
netdev_dbg(dev, "ESI Error\n");
}
- if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) {
+ if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) {
cf->can_id |= CAN_RTR_FLAG;
} else {
- if (dlc & RX_BUF_BRS)
+ if (fifo_header.dlc & RX_BUF_BRS)
cf->flags |= CANFD_BRS;
- for (i = 0; i < cf->len; i += 4)
- *(u32 *)(cf->data + i) =
- m_can_fifo_read(cdev, fgi,
- M_CAN_FIFO_DATA(i / 4));
+ err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
+ cf->data, DIV_ROUND_UP(cf->len, 4));
+ if (err)
+ goto out_fail;
}
/* acknowledge rx fifo 0 */
@@ -510,9 +526,15 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
stats->rx_packets++;
stats->rx_bytes += cf->len;
- timestamp = FIELD_GET(RX_BUF_RXTS_MASK, dlc);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
m_can_receive_skb(cdev, skb, timestamp);
+
+ return 0;
+
+out_fail:
+ netdev_err(dev, "FIFO read returned %d\n", err);
+ return err;
}
static int m_can_do_rx_poll(struct net_device *dev, int quota)
@@ -520,6 +542,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0;
u32 rxfs;
+ int err;
rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) {
@@ -528,7 +551,9 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
}
while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
- m_can_read_fifo(dev, rxfs);
+ err = m_can_read_fifo(dev, rxfs);
+ if (err)
+ return err;
quota--;
pkts++;
@@ -874,6 +899,7 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
static int m_can_rx_handler(struct net_device *dev, int quota)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int rx_work_or_err;
int work_done = 0;
u32 irqstatus, psr;
@@ -910,8 +936,13 @@ static int m_can_rx_handler(struct net_device *dev, int quota)
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
- if (irqstatus & IR_RF0N)
- work_done += m_can_do_rx_poll(dev, (quota - work_done));
+ if (irqstatus & IR_RF0N) {
+ rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
+ if (rx_work_or_err < 0)
+ return rx_work_or_err;
+
+ work_done += rx_work_or_err;
+ }
end:
return work_done;
}
@@ -919,12 +950,17 @@ end:
static int m_can_rx_peripheral(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int work_done;
- m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
+ work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
- m_can_enable_all_interrupts(cdev);
+ /* Don't re-enable interrupts if the driver had a fatal error
+ * (e.g., FIFO read failure).
+ */
+ if (work_done >= 0)
+ m_can_enable_all_interrupts(cdev);
- return 0;
+ return work_done;
}
static int m_can_poll(struct napi_struct *napi, int quota)
@@ -934,7 +970,11 @@ static int m_can_poll(struct napi_struct *napi, int quota)
int work_done;
work_done = m_can_rx_handler(dev, quota);
- if (work_done < quota) {
+
+ /* Don't re-enable interrupts if the driver had a fatal error
+ * (e.g., FIFO read failure).
+ */
+ if (work_done >= 0 && work_done < quota) {
napi_complete_done(napi, work_done);
m_can_enable_all_interrupts(cdev);
}
@@ -945,7 +985,7 @@ static int m_can_poll(struct napi_struct *napi, int quota)
/* Echo tx skb and update net stats. Peripherals use rx-offload for
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
-*/
+ */
static void m_can_tx_update_stats(struct m_can_classdev *cdev,
unsigned int msg_mark,
u32 timestamp)
@@ -965,7 +1005,7 @@ static void m_can_tx_update_stats(struct m_can_classdev *cdev,
stats->tx_packets++;
}
-static void m_can_echo_tx_event(struct net_device *dev)
+static int m_can_echo_tx_event(struct net_device *dev)
{
u32 txe_count = 0;
u32 m_can_txefs;
@@ -984,12 +1024,18 @@ static void m_can_echo_tx_event(struct net_device *dev)
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0;
+ int err;
/* retrieve get index */
fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
/* get message marker, timestamp */
- txe = m_can_txe_fifo_read(cdev, fgi, 4);
+ err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
+ if (err) {
+ netdev_err(dev, "TXE FIFO read returned %d\n", err);
+ return err;
+ }
+
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
@@ -1000,6 +1046,8 @@ static void m_can_echo_tx_event(struct net_device *dev)
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
}
+
+ return 0;
}
static irqreturn_t m_can_isr(int irq, void *dev_id)
@@ -1031,8 +1079,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
m_can_disable_all_interrupts(cdev);
if (!cdev->is_peripheral)
napi_schedule(&cdev->napi);
- else
- m_can_rx_peripheral(dev);
+ else if (m_can_rx_peripheral(dev) < 0)
+ goto out_fail;
}
if (cdev->version == 30) {
@@ -1050,7 +1098,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
} else {
if (ir & IR_TEFN) {
/* New TX FIFO Element arrived */
- m_can_echo_tx_event(dev);
+ if (m_can_echo_tx_event(dev) != 0)
+ goto out_fail;
+
can_led_event(dev, CAN_LED_EVENT_TX);
if (netif_queue_stopped(dev) &&
!m_can_tx_fifo_full(cdev))
@@ -1058,6 +1108,13 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
}
}
+ if (cdev->is_peripheral)
+ can_rx_offload_threaded_irq_finish(&cdev->offload);
+
+ return IRQ_HANDLED;
+
+out_fail:
+ m_can_disable_all_interrupts(cdev);
return IRQ_HANDLED;
}
@@ -1302,7 +1359,8 @@ static void m_can_chip_config(struct net_device *dev)
m_can_set_bittiming(dev);
/* enable internal timestamp generation, with a prescalar of 16. The
- * prescalar is applied to the nominal bit timing */
+ * prescalar is applied to the nominal bit timing
+ */
m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
m_can_config_endisable(cdev, false);
@@ -1436,32 +1494,20 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_30X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_30X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
@@ -1518,6 +1564,8 @@ static int m_can_close(struct net_device *dev)
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
+ phy_power_off(cdev->transceiver);
+
return 0;
}
@@ -1540,8 +1588,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
struct net_device *dev = cdev->net;
struct sk_buff *skb = cdev->tx_skb;
- u32 id, cccr, fdflags;
- int i;
+ struct id_and_dlc fifo_header;
+ u32 cccr, fdflags;
+ int err;
int putidx;
cdev->tx_skb = NULL;
@@ -1549,27 +1598,29 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- id = cf->can_id & CAN_EFF_MASK;
- id |= TX_BUF_XTD;
+ fifo_header.id = cf->can_id & CAN_EFF_MASK;
+ fifo_header.id |= TX_BUF_XTD;
} else {
- id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- id |= TX_BUF_RTR;
+ fifo_header.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- /* message ram configuration */
- m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id);
- m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC,
- can_fd_len2dlc(cf->len) << 16);
+ fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+
+ /* Write the frame ID, DLC, and payload to the FIFO element. */
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ if (err)
+ goto out_fail;
- for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(cdev, 0,
- M_CAN_FIFO_DATA(i / 4),
- *(u32 *)(cf->data + i));
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
+ cf->data, DIV_ROUND_UP(cf->len, 4));
+ if (err)
+ goto out_fail;
can_put_echo_skb(skb, dev, 0, 0);
@@ -1613,8 +1664,11 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* get put index for frame */
putidx = FIELD_GET(TXFQS_TFQPI_MASK,
m_can_read(cdev, M_CAN_TXFQS));
- /* Write ID Field to FIFO Element */
- m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id);
+
+ /* Construct DLC Field, with CAN-FD configuration.
+ * Use the put index of the fifo as the message marker,
+ * used in the TX interrupt for sending the correct echo frame.
+ */
/* get CAN FD configuration of frame */
fdflags = 0;
@@ -1624,20 +1678,17 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- /* Construct DLC Field. Also contains CAN-FD configuration
- * use put index of fifo as message marker
- * it is used in TX interrupt for
- * sending the correct echo frame
- */
- m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
- FIELD_PREP(TX_BUF_MM_MASK, putidx) |
- FIELD_PREP(TX_BUF_DLC_MASK,
- can_fd_len2dlc(cf->len)) |
- fdflags | TX_BUF_EFC);
+ fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
+ fdflags | TX_BUF_EFC;
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
+ if (err)
+ goto out_fail;
- for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4),
- *(u32 *)(cf->data + i));
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
+ cf->data, DIV_ROUND_UP(cf->len, 4));
+ if (err)
+ goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
@@ -1654,6 +1705,11 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
return NETDEV_TX_OK;
+
+out_fail:
+ netdev_err(dev, "FIFO write returned %d\n", err);
+ m_can_disable_all_interrupts(cdev);
+ return NETDEV_TX_BUSY;
}
static void m_can_tx_work_queue(struct work_struct *ws)
@@ -1703,10 +1759,14 @@ static int m_can_open(struct net_device *dev)
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
- err = m_can_clk_start(cdev);
+ err = phy_power_on(cdev->transceiver);
if (err)
return err;
+ err = m_can_clk_start(cdev);
+ if (err)
+ goto out_phy_power_off;
+
/* open the can device */
err = open_candev(dev);
if (err) {
@@ -1763,6 +1823,8 @@ out_wq_fail:
close_candev(dev);
exit_disable_clks:
m_can_clk_stop(cdev);
+out_phy_power_off:
+ phy_power_off(cdev->transceiver);
return err;
}
@@ -1819,9 +1881,10 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-void m_can_init_ram(struct m_can_classdev *cdev)
+int m_can_init_ram(struct m_can_classdev *cdev)
{
int end, i, start;
+ int err = 0;
/* initialize the entire Message RAM in use to avoid possible
* ECC/parity checksum errors when reading an uninitialized buffer
@@ -1830,8 +1893,13 @@ void m_can_init_ram(struct m_can_classdev *cdev)
end = cdev->mcfg[MRAM_TXB].off +
cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- for (i = start; i < end; i += 4)
- m_can_fifo_write_no_off(cdev, i, 0x0);
+ for (i = start; i < end; i += 4) {
+ err = m_can_fifo_write_no_off(cdev, i, 0x0);
+ if (err)
+ break;
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(m_can_init_ram);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index ace071c3e58c..d18b515e6ccc 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -28,6 +28,7 @@
#include <linux/iopoll.h>
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy/phy.h>
/* m_can lec values */
enum m_can_lec_type {
@@ -64,9 +65,9 @@ struct m_can_ops {
int (*clear_interrupts)(struct m_can_classdev *cdev);
u32 (*read_reg)(struct m_can_classdev *cdev, int reg);
int (*write_reg)(struct m_can_classdev *cdev, int reg, int val);
- u32 (*read_fifo)(struct m_can_classdev *cdev, int addr_offset);
+ int (*read_fifo)(struct m_can_classdev *cdev, int addr_offset, void *val, size_t val_count);
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
- int val);
+ const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
};
@@ -82,9 +83,7 @@ struct m_can_classdev {
struct workqueue_struct *tx_wq;
struct work_struct tx_work;
struct sk_buff *tx_skb;
-
- struct can_bittiming_const *bit_timing;
- struct can_bittiming_const *data_timing;
+ struct phy *transceiver;
struct m_can_ops *ops;
@@ -102,7 +101,7 @@ void m_can_class_free_dev(struct net_device *net);
int m_can_class_register(struct m_can_classdev *cdev);
void m_can_class_unregister(struct m_can_classdev *cdev);
int m_can_class_get_clocks(struct m_can_classdev *cdev);
-void m_can_init_ram(struct m_can_classdev *priv);
+int m_can_init_ram(struct m_can_classdev *priv);
int m_can_class_suspend(struct device *dev);
int m_can_class_resume(struct device *dev);
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 128808605c3f..89cc3d41e952 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -39,11 +39,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
return readl(priv->base + reg);
}
-static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset)
+static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
- return readl(priv->base + offset);
+ ioread32_rep(priv->base + offset, val, val_count);
+
+ return 0;
}
static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val)
@@ -55,11 +57,12 @@ static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val)
return 0;
}
-static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val)
+static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
+ const void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
- writel(val, priv->base + offset);
+ iowrite32_rep(priv->base + offset, val, val_count);
return 0;
}
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 599de0e08cd7..308d4f2fff00 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -6,6 +6,7 @@
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include "m_can.h"
@@ -28,11 +29,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
return readl(priv->base + reg);
}
-static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset)
+static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
- return readl(priv->mram_base + offset);
+ ioread32_rep(priv->mram_base + offset, val, val_count);
+
+ return 0;
}
static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val)
@@ -44,11 +47,12 @@ static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val)
return 0;
}
-static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val)
+static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
+ const void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
- writel(val, priv->mram_base + offset);
+ iowrite32_rep(priv->base + offset, val, val_count);
return 0;
}
@@ -67,6 +71,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
void __iomem *mram_addr;
+ struct phy *transceiver;
int irq, ret = 0;
mcan_class = m_can_class_allocate_dev(&pdev->dev,
@@ -80,8 +85,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
if (ret)
goto probe_fail;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource_byname(pdev, "m_can");
irq = platform_get_irq_byname(pdev, "int0");
if (IS_ERR(addr) || irq < 0) {
ret = -EINVAL;
@@ -101,6 +105,16 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto probe_fail;
}
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver)) {
+ ret = PTR_ERR(transceiver);
+ dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
+ goto probe_fail;
+ }
+
+ if (transceiver)
+ mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate;
+
priv->base = addr;
priv->mram_base = mram_addr;
@@ -108,6 +122,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->pm_clock_support = 1;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
+ mcan_class->transceiver = transceiver;
mcan_class->ops = &m_can_plat_ops;
@@ -115,7 +130,9 @@ static int m_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mcan_class);
- m_can_init_ram(mcan_class);
+ ret = m_can_init_ram(mcan_class);
+ if (ret)
+ goto probe_fail;
pm_runtime_enable(mcan_class->dev);
ret = m_can_class_register(mcan_class);
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 4147cecfbbd6..04687b15b250 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -105,7 +105,6 @@
static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
{
return container_of(cdev, struct tcan4x5x_priv, cdev);
-
}
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
@@ -154,14 +153,12 @@ static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
return val;
}
-static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset)
+static int tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset,
+ void *val, size_t val_count)
{
struct tcan4x5x_priv *priv = cdev_to_priv(cdev);
- u32 val;
-
- regmap_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, &val);
- return val;
+ return regmap_bulk_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count);
}
static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val)
@@ -172,11 +169,11 @@ static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val)
}
static int tcan4x5x_write_fifo(struct m_can_classdev *cdev,
- int addr_offset, int val)
+ int addr_offset, const void *val, size_t val_count)
{
struct tcan4x5x_priv *priv = cdev_to_priv(cdev);
- return regmap_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val);
+ return regmap_bulk_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count);
}
static int tcan4x5x_power_enable(struct regulator *reg, int enable)
@@ -238,7 +235,9 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
return ret;
/* Zero out the MCAN buffers */
- m_can_init_ram(cdev);
+ ret = m_can_init_ram(cdev);
+ if (ret)
+ return ret;
ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 311e6ca3bdc4..5d4d52afde15 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -37,9 +37,15 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/iopoll.h>
+#include <linux/reset.h>
#define RCANFD_DRV_NAME "rcar_canfd"
+enum rcanfd_chip_id {
+ RENESAS_RCAR_GEN3 = 0,
+ RENESAS_RZG2L,
+};
+
/* Global register bits */
/* RSCFDnCFDGRMCFG */
@@ -513,6 +519,9 @@ struct rcar_canfd_global {
enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
unsigned long channels_mask; /* Enabled channels mask */
bool fdmode; /* CAN FD or Classical CAN only mode */
+ struct reset_control *rstc1;
+ struct reset_control *rstc2;
+ enum rcanfd_chip_id chip_id;
};
/* CAN FD mode nominal rate constants */
@@ -1070,38 +1079,70 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
can_led_event(ndev, CAN_LED_EVENT_TX);
}
+static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+ u32 gerfl;
+
+ /* Handle global error interrupts */
+ gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
+ if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl)))
+ rcar_canfd_global_error(ndev);
+}
+
+static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ rcar_canfd_handle_global_err(gpriv, ch);
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+ u32 sts;
+
+ /* Handle Rx interrupts */
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ rcar_canfd_clear_bit(priv->base,
+ RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+}
+
+static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ rcar_canfd_handle_global_receive(gpriv, ch);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
{
struct rcar_canfd_global *gpriv = dev_id;
- struct net_device *ndev;
- struct rcar_canfd_channel *priv;
- u32 sts, gerfl;
- u32 ch, ridx;
+ u32 ch;
/* Global error interrupts still indicate a condition specific
* to a channel. RxFIFO interrupt is a global interrupt.
*/
for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
- priv = gpriv->ch[ch];
- ndev = priv->ndev;
- ridx = ch + RCANFD_RFFIFO_IDX;
-
- /* Global error interrupts */
- gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl)))
- rcar_canfd_global_error(ndev);
-
- /* Handle Rx interrupts */
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
- if (likely(sts & RCANFD_RFSTS_RFIF)) {
- if (napi_schedule_prep(&priv->napi)) {
- /* Disable Rx FIFO interrupts */
- rcar_canfd_clear_bit(priv->base,
- RCANFD_RFCC(ridx),
- RCANFD_RFCC_RFIE);
- __napi_schedule(&priv->napi);
- }
- }
+ rcar_canfd_handle_global_err(gpriv, ch);
+ rcar_canfd_handle_global_receive(gpriv, ch);
}
return IRQ_HANDLED;
}
@@ -1139,38 +1180,73 @@ static void rcar_canfd_state_change(struct net_device *ndev,
}
}
-static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
+static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ struct rcar_canfd_channel *priv = priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+ u32 sts;
+
+ /* Handle Tx interrupts */
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ if (likely(sts & RCANFD_CFSTS_CFTXIF))
+ rcar_canfd_tx_done(ndev);
+}
+
+static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
{
struct rcar_canfd_global *gpriv = dev_id;
- struct net_device *ndev;
- struct rcar_canfd_channel *priv;
- u32 sts, ch, cerfl;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ rcar_canfd_handle_channel_tx(gpriv, ch);
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
u16 txerr, rxerr;
+ u32 sts, cerfl;
+
+ /* Handle channel error interrupts */
+ cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch));
+ sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
+ txerr = RCANFD_CSTS_TECCNT(sts);
+ rxerr = RCANFD_CSTS_RECCNT(sts);
+ if (unlikely(RCANFD_CERFL_ERR(cerfl)))
+ rcar_canfd_error(ndev, cerfl, txerr, rxerr);
+
+ /* Handle state change to lower states */
+ if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE &&
+ priv->can.state != CAN_STATE_BUS_OFF))
+ rcar_canfd_state_change(ndev, txerr, rxerr);
+}
+
+static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ rcar_canfd_handle_channel_err(gpriv, ch);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ u32 ch;
/* Common FIFO is a per channel resource */
for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
- priv = gpriv->ch[ch];
- ndev = priv->ndev;
-
- /* Channel error interrupts */
- cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch));
- sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
- txerr = RCANFD_CSTS_TECCNT(sts);
- rxerr = RCANFD_CSTS_RECCNT(sts);
- if (unlikely(RCANFD_CERFL_ERR(cerfl)))
- rcar_canfd_error(ndev, cerfl, txerr, rxerr);
-
- /* Handle state change to lower states */
- if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) &&
- (priv->can.state != CAN_STATE_BUS_OFF)))
- rcar_canfd_state_change(ndev, txerr, rxerr);
-
- /* Handle Tx interrupts */
- sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
- if (likely(sts & RCANFD_CFSTS_CFTXIF))
- rcar_canfd_tx_done(ndev);
+ rcar_canfd_handle_channel_err(gpriv, ch);
+ rcar_canfd_handle_channel_tx(gpriv, ch);
}
+
return IRQ_HANDLED;
}
@@ -1577,6 +1653,53 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.clock.freq = fcan_freq;
dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+ if (gpriv->chip_id == RENESAS_RZG2L) {
+ char *irq_name;
+ int err_irq;
+ int tx_irq;
+
+ err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err");
+ if (err_irq < 0) {
+ err = err_irq;
+ goto fail;
+ }
+
+ tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx");
+ if (tx_irq < 0) {
+ err = tx_irq;
+ goto fail;
+ }
+
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "canfd.ch%d_err", ch);
+ if (!irq_name) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ err = devm_request_irq(&pdev->dev, err_irq,
+ rcar_canfd_channel_err_interrupt, 0,
+ irq_name, gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
+ err_irq, err);
+ goto fail;
+ }
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "canfd.ch%d_trx", ch);
+ if (!irq_name) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ err = devm_request_irq(&pdev->dev, tx_irq,
+ rcar_canfd_channel_tx_interrupt, 0,
+ irq_name, gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
+ tx_irq, err);
+ goto fail;
+ }
+ }
+
if (gpriv->fdmode) {
priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
priv->can.data_bittiming_const =
@@ -1636,7 +1759,11 @@ static int rcar_canfd_probe(struct platform_device *pdev)
struct device_node *of_child;
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
+ int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
+ enum rcanfd_chip_id chip_id;
+
+ chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
@@ -1649,16 +1776,30 @@ static int rcar_canfd_probe(struct platform_device *pdev)
if (of_child && of_device_is_available(of_child))
channels_mask |= BIT(1); /* Channel 1 */
- ch_irq = platform_get_irq(pdev, 0);
- if (ch_irq < 0) {
- err = ch_irq;
- goto fail_dev;
- }
+ if (chip_id == RENESAS_RCAR_GEN3) {
+ ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
+ if (ch_irq < 0) {
+ /* For backward compatibility get irq by index */
+ ch_irq = platform_get_irq(pdev, 0);
+ if (ch_irq < 0)
+ return ch_irq;
+ }
- g_irq = platform_get_irq(pdev, 1);
- if (g_irq < 0) {
- err = g_irq;
- goto fail_dev;
+ g_irq = platform_get_irq_byname_optional(pdev, "g_int");
+ if (g_irq < 0) {
+ /* For backward compatibility get irq by index */
+ g_irq = platform_get_irq(pdev, 1);
+ if (g_irq < 0)
+ return g_irq;
+ }
+ } else {
+ g_err_irq = platform_get_irq_byname(pdev, "g_err");
+ if (g_err_irq < 0)
+ return g_err_irq;
+
+ g_recc_irq = platform_get_irq_byname(pdev, "g_recc");
+ if (g_recc_irq < 0)
+ return g_recc_irq;
}
/* Global controller context */
@@ -1670,6 +1811,19 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
+ gpriv->chip_id = chip_id;
+
+ if (gpriv->chip_id == RENESAS_RZG2L) {
+ gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
+ if (IS_ERR(gpriv->rstc1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
+ "failed to get rstp_n\n");
+
+ gpriv->rstc2 = devm_reset_control_get_exclusive(&pdev->dev, "rstc_n");
+ if (IS_ERR(gpriv->rstc2))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
+ "failed to get rstc_n\n");
+ }
/* Peripheral clock */
gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
@@ -1699,7 +1853,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK)
+ if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
@@ -1711,20 +1865,51 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->base = addr;
/* Request IRQ that's common for both channels */
- err = devm_request_irq(&pdev->dev, ch_irq,
- rcar_canfd_channel_interrupt, 0,
- "canfd.chn", gpriv);
- if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- ch_irq, err);
- goto fail_dev;
+ if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
+ err = devm_request_irq(&pdev->dev, ch_irq,
+ rcar_canfd_channel_interrupt, 0,
+ "canfd.ch_int", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ ch_irq, err);
+ goto fail_dev;
+ }
+
+ err = devm_request_irq(&pdev->dev, g_irq,
+ rcar_canfd_global_interrupt, 0,
+ "canfd.g_int", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ g_irq, err);
+ goto fail_dev;
+ }
+ } else {
+ err = devm_request_irq(&pdev->dev, g_recc_irq,
+ rcar_canfd_global_receive_fifo_interrupt, 0,
+ "canfd.g_recc", gpriv);
+
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ g_recc_irq, err);
+ goto fail_dev;
+ }
+
+ err = devm_request_irq(&pdev->dev, g_err_irq,
+ rcar_canfd_global_err_interrupt, 0,
+ "canfd.g_err", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ g_err_irq, err);
+ goto fail_dev;
+ }
}
- err = devm_request_irq(&pdev->dev, g_irq,
- rcar_canfd_global_interrupt, 0,
- "canfd.gbl", gpriv);
+
+ err = reset_control_reset(gpriv->rstc1);
+ if (err)
+ goto fail_dev;
+ err = reset_control_reset(gpriv->rstc2);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_irq, err);
+ reset_control_assert(gpriv->rstc1);
goto fail_dev;
}
@@ -1733,7 +1918,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"failed to enable peripheral clock, error %d\n", err);
- goto fail_dev;
+ goto fail_reset;
}
err = rcar_canfd_reset_controller(gpriv);
@@ -1790,6 +1975,9 @@ fail_mode:
rcar_canfd_disable_global_interrupts(gpriv);
fail_clk:
clk_disable_unprepare(gpriv->clkp);
+fail_reset:
+ reset_control_assert(gpriv->rstc1);
+ reset_control_assert(gpriv->rstc2);
fail_dev:
return err;
}
@@ -1810,6 +1998,9 @@ static int rcar_canfd_remove(struct platform_device *pdev)
/* Enter global sleep mode */
rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
clk_disable_unprepare(gpriv->clkp);
+ reset_control_assert(gpriv->rstc1);
+ reset_control_assert(gpriv->rstc2);
+
return 0;
}
@@ -1827,7 +2018,8 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
rcar_canfd_resume);
static const struct of_device_id rcar_canfd_of_table[] = {
- { .compatible = "renesas,rcar-gen3-canfd" },
+ { .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
+ { .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
{ }
};
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 84eac8cb8686..6db90dc4bc9d 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -28,6 +28,10 @@ MODULE_LICENSE("GPL v2");
#define DRV_NAME "peak_pci"
+/* FPGA cards FW version registers */
+#define PEAK_VER_REG1 0x40
+#define PEAK_VER_REG2 0x44
+
struct peak_pciec_card;
struct peak_pci_chan {
void __iomem *cfg_base; /* Common for all channels */
@@ -41,9 +45,7 @@ struct peak_pci_chan {
#define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define PEAK_PCI_OCR OCR_TX0_PUSHPULL
-/*
- * Important PITA registers
- */
+/* Important PITA registers */
#define PITA_ICR 0x00 /* Interrupt control register */
#define PITA_GPIOICR 0x18 /* GPIO interface control register */
#define PITA_MISC 0x1C /* Miscellaneous register */
@@ -70,27 +72,47 @@ static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = {
};
static const struct pci_device_id peak_pci_tbl[] = {
- {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {
+ PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PCI",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PCI Express",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-miniPCI",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-miniPCIe",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PC/104-Plus Quad",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-PCI/104-Express",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-cPCI",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-Chip PCIe",
+ },
#ifdef CONFIG_CAN_PEAK_PCIEC
- {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-ExpressCard",
+ }, {
+ PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)"PCAN-ExpressCard 34",
+ },
#endif
- {0,}
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
#ifdef CONFIG_CAN_PEAK_PCIEC
-/*
- * PCAN-ExpressCard needs I2C bit-banging configuration option.
- */
+/* PCAN-ExpressCard needs I2C bit-banging configuration option. */
/* GPIOICR byte access offsets */
#define PITA_GPOUT 0x18 /* GPx output value */
@@ -156,12 +178,14 @@ static void peak_pci_write_reg(const struct sja1000_priv *priv,
static inline void pita_set_scl_highz(struct peak_pciec_card *card)
{
u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL;
+
writeb(gp_outen, card->cfg_base + PITA_GPOEN);
}
static inline void pita_set_sda_highz(struct peak_pciec_card *card)
{
u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA;
+
writeb(gp_outen, card->cfg_base + PITA_GPOEN);
}
@@ -230,9 +254,7 @@ static int pita_getscl(void *data)
return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0;
}
-/*
- * write commands to the LED chip though the I2C-bus of the PCAN-PCIeC
- */
+/* write commands to the LED chip though the I2C-bus of the PCAN-PCIeC */
static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
u8 offset, u8 data)
{
@@ -248,7 +270,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
int ret;
/* cache led mask */
- if ((offset == 5) && (data == card->led_cache))
+ if (offset == 5 && data == card->led_cache)
return 0;
ret = i2c_transfer(&card->led_chip, &msg, 1);
@@ -261,9 +283,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card,
return 0;
}
-/*
- * delayed work callback used to control the LEDs
- */
+/* delayed work callback used to control the LEDs */
static void peak_pciec_led_work(struct work_struct *work)
{
struct peak_pciec_card *card =
@@ -309,9 +329,7 @@ static void peak_pciec_led_work(struct work_struct *work)
schedule_delayed_work(&card->led_work, HZ);
}
-/*
- * set LEDs blinking state
- */
+/* set LEDs blinking state */
static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
{
u8 new_led = card->led_cache;
@@ -328,25 +346,19 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
peak_pciec_write_pca9553(card, 5, new_led);
}
-/*
- * start one second delayed work to control LEDs
- */
+/* start one second delayed work to control LEDs */
static void peak_pciec_start_led_work(struct peak_pciec_card *card)
{
schedule_delayed_work(&card->led_work, HZ);
}
-/*
- * stop LEDs delayed work
- */
+/* stop LEDs delayed work */
static void peak_pciec_stop_led_work(struct peak_pciec_card *card)
{
cancel_delayed_work_sync(&card->led_work);
}
-/*
- * initialize the PCA9553 4-bit I2C-bus LED chip
- */
+/* initialize the PCA9553 4-bit I2C-bus LED chip */
static int peak_pciec_init_leds(struct peak_pciec_card *card)
{
int err;
@@ -375,17 +387,14 @@ static int peak_pciec_init_leds(struct peak_pciec_card *card)
return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT);
}
-/*
- * restore LEDs state to off peak_pciec_leds_exit
- */
+/* restore LEDs state to off peak_pciec_leds_exit */
static void peak_pciec_leds_exit(struct peak_pciec_card *card)
{
/* switch LEDs to off */
peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL);
}
-/*
- * normal write sja1000 register method overloaded to catch when controller
+/* normal write sja1000 register method overloaded to catch when controller
* is started or stopped, to control leds
*/
static void peak_pciec_write_reg(const struct sja1000_priv *priv,
@@ -443,7 +452,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
/* channel is the first one: do the init part */
} else {
/* create the bit banging I2C adapter structure */
- card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL);
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
@@ -506,9 +515,7 @@ static void peak_pciec_remove(struct peak_pciec_card *card)
#else /* CONFIG_CAN_PEAK_PCIEC */
-/*
- * Placebo functions when PCAN-ExpressCard support is not selected
- */
+/* Placebo functions when PCAN-ExpressCard support is not selected */
static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
{
return -ENODEV;
@@ -549,6 +556,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
+ char fw_str[14] = "";
err = pci_enable_device(pdev);
if (err)
@@ -602,6 +610,21 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Leave parport mux mode */
writeb(0x04, cfg_base + PITA_MISC + 3);
+ /* FPGA equipped card if not 0 */
+ if (readl(cfg_base + PEAK_VER_REG1)) {
+ /* FPGA card: display version of the running firmware */
+ u32 fw_ver = readl(cfg_base + PEAK_VER_REG2);
+
+ snprintf(fw_str, sizeof(fw_str), " FW v%u.%u.%u",
+ (fw_ver >> 12) & 0xf,
+ (fw_ver >> 8) & 0xf,
+ (fw_ver >> 4) & 0xf);
+ }
+
+ /* Display commercial name (and, eventually, FW version) of the card */
+ dev_info(&pdev->dev, "%ux CAN %s%s\n",
+ channels, (const char *)ent->driver_data, fw_str);
+
icr = readw(cfg_base + PITA_ICR + 2);
for (i = 0; i < channels; i++) {
@@ -642,8 +665,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
chan->prev_dev = pci_get_drvdata(pdev);
pci_set_drvdata(pdev, dev);
- /*
- * PCAN-ExpressCard needs some additional i2c init.
+ /* PCAN-ExpressCard needs some additional i2c init.
* This must be done *before* register_sja1000dev() but
* *after* devices linkage
*/
@@ -709,7 +731,8 @@ failure_disable_pci:
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
* the probe() function must return a negative errno in case of failure
- * (err is unchanged if negative) */
+ * (err is unchanged if negative)
+ */
return pcibios_err_to_errno(err);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 9ae48072b6c6..673861ab665a 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -15,10 +15,10 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <asm/unaligned.h>
@@ -1456,7 +1456,7 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
}
static void
-mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv,
+mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
struct sk_buff *skb)
{
@@ -2195,8 +2195,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
priv->regs_status.intf);
- if (!(intf_pending))
+ if (!(intf_pending)) {
+ can_rx_offload_threaded_irq_finish(&priv->offload);
return handled;
+ }
/* Some interrupts must be ACKed in the
* MCP251XFD_REG_INT register.
@@ -2296,6 +2298,8 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
} while (1);
out_fail:
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
err, priv->regs_status.intf);
mcp251xfd_dump(priv);
@@ -2524,8 +2528,8 @@ static int mcp251xfd_open(struct net_device *ndev)
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
- IRQF_ONESHOT, dev_name(&spi->dev),
- priv);
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&spi->dev), priv);
if (err)
goto out_can_rx_offload_disable;
@@ -2857,7 +2861,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
- u32 freq;
+ u32 freq = 0;
int err;
if (!spi->irq)
@@ -2884,11 +2888,19 @@ static int mcp251xfd_probe(struct spi_device *spi)
return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
"Failed to get Transceiver regulator!\n");
- clk = devm_clk_get(&spi->dev, NULL);
+ clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&spi->dev, PTR_ERR(clk),
"Failed to get Oscillator (clock)!\n");
- freq = clk_get_rate(clk);
+ if (clk) {
+ freq = clk_get_rate(clk);
+ } else {
+ err = device_property_read_u32(&spi->dev, "clock-frequency",
+ &freq);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "Failed to get clock-frequency!\n");
+ }
/* Sanity check */
if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index ed3169274d24..712e09186987 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -13,7 +13,7 @@
static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
{
- struct mcp251xfd_priv *priv;
+ const struct mcp251xfd_priv *priv;
u32 timestamp = 0;
int err;
@@ -39,7 +39,7 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
-void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv,
+void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
struct sk_buff *skb, u32 timestamp)
{
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 1002f3902ad2..0f322dabaf65 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -853,7 +853,7 @@ int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
-void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv,
+void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 73245d8836a9..353062ead98f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -786,6 +786,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
int_status = hecc_read(priv, HECC_CANGIF0);
}
+ can_rx_offload_irq_finish(&priv->offload);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 66fa8b07c2e6..7370981e9b34 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -476,7 +476,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
netif_trans_update(netdev);
}
-static ssize_t show_firmware(struct device *d,
+static ssize_t firmware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
@@ -487,9 +487,9 @@ static ssize_t show_firmware(struct device *d,
(dev->version >> 8) & 0xf,
dev->version & 0xff);
}
-static DEVICE_ATTR(firmware, 0444, show_firmware, NULL);
+static DEVICE_ATTR_RO(firmware);
-static ssize_t show_hardware(struct device *d,
+static ssize_t hardware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
@@ -500,9 +500,9 @@ static ssize_t show_hardware(struct device *d,
(dev->version >> 24) & 0xf,
(dev->version >> 16) & 0xff);
}
-static DEVICE_ATTR(hardware, 0444, show_hardware, NULL);
+static DEVICE_ATTR_RO(hardware);
-static ssize_t show_nets(struct device *d,
+static ssize_t nets_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
@@ -510,7 +510,7 @@ static ssize_t show_nets(struct device *d,
return sprintf(buf, "%d", dev->net_count);
}
-static DEVICE_ATTR(nets, 0444, show_nets, NULL);
+static DEVICE_ATTR_RO(nets);
static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
{
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 1985f772fc3c..14e360c9f2c9 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -355,7 +355,7 @@ static int es581_4_tx_can_msg(struct es58x_priv *priv,
return -EMSGSIZE;
if (priv->tx_can_msg_cnt == 0) {
- msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */
+ msg_len = sizeof(es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg);
es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE,
ES581_4_CMD_ID_TX_MSG,
priv->channel_idx, msg_len);
@@ -371,8 +371,7 @@ static int es581_4_tx_can_msg(struct es58x_priv *priv,
return ret;
/* Fill message contents. */
- tx_can_msg = (struct es581_4_tx_can_msg *)
- &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1];
+ tx_can_msg = (typeof(tx_can_msg))&es581_4_urb_cmd->raw_msg[msg_len];
put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id);
put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx);
put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 8e9102482c52..96a13c770e4a 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -19,7 +19,7 @@
#include "es58x_core.h"
#define DRV_VERSION "1.00"
-MODULE_AUTHOR("Mailhol Vincent <mailhol.vincent@wanadoo.fr>");
+MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
MODULE_VERSION(DRV_VERSION);
@@ -70,7 +70,7 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
* bytes (the start of frame) are skipped and the CRC calculation
* starts on the third byte.
*/
-#define ES58X_CRC_CALC_OFFSET 2
+#define ES58X_CRC_CALC_OFFSET sizeof_field(union es58x_urb_cmd, sof)
/**
* es58x_calculate_crc() - Compute the crc16 of a given URB.
@@ -2108,6 +2108,25 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
}
/**
+ * es58x_free_netdevs() - Release all network resources of the device.
+ * @es58x_dev: ES58X device.
+ */
+static void es58x_free_netdevs(struct es58x_device *es58x_dev)
+{
+ int i;
+
+ for (i = 0; i < es58x_dev->num_can_ch; i++) {
+ struct net_device *netdev = es58x_dev->netdev[i];
+
+ if (!netdev)
+ continue;
+ unregister_candev(netdev);
+ es58x_dev->netdev[i] = NULL;
+ free_candev(netdev);
+ }
+}
+
+/**
* es58x_get_product_info() - Get the product information and print them.
* @es58x_dev: ES58X device.
*
@@ -2152,14 +2171,13 @@ static int es58x_get_product_info(struct es58x_device *es58x_dev)
/**
* es58x_init_es58x_dev() - Initialize the ES58X device.
* @intf: USB interface.
- * @p_es58x_dev: pointer to the address of the ES58X device.
* @driver_info: Quirks of the device.
*
- * Return: zero on success, errno when any error occurs.
+ * Return: pointer to an ES58X device on success, error pointer when
+ * any error occurs.
*/
-static int es58x_init_es58x_dev(struct usb_interface *intf,
- struct es58x_device **p_es58x_dev,
- kernel_ulong_t driver_info)
+static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
+ kernel_ulong_t driver_info)
{
struct device *dev = &intf->dev;
struct es58x_device *es58x_dev;
@@ -2176,7 +2194,7 @@ static int es58x_init_es58x_dev(struct usb_interface *intf,
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
if (ret)
- return ret;
+ return ERR_PTR(ret);
if (driver_info & ES58X_FD_FAMILY) {
param = &es58x_fd_param;
@@ -2186,9 +2204,10 @@ static int es58x_init_es58x_dev(struct usb_interface *intf,
ops = &es581_4_ops;
}
- es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), GFP_KERNEL);
+ es58x_dev = devm_kzalloc(dev, es58x_sizeof_es58x_device(param),
+ GFP_KERNEL);
if (!es58x_dev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
es58x_dev->param = param;
es58x_dev->ops = ops;
@@ -2213,9 +2232,7 @@ static int es58x_init_es58x_dev(struct usb_interface *intf,
ep_out->bEndpointAddress);
es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
- *p_es58x_dev = es58x_dev;
-
- return 0;
+ return es58x_dev;
}
/**
@@ -2232,30 +2249,21 @@ static int es58x_probe(struct usb_interface *intf,
struct es58x_device *es58x_dev;
int ch_idx, ret;
- ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info);
- if (ret)
- return ret;
+ es58x_dev = es58x_init_es58x_dev(intf, id->driver_info);
+ if (IS_ERR(es58x_dev))
+ return PTR_ERR(es58x_dev);
ret = es58x_get_product_info(es58x_dev);
if (ret)
- goto cleanup_es58x_dev;
+ return ret;
for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
ret = es58x_init_netdev(es58x_dev, ch_idx);
- if (ret)
- goto cleanup_candev;
- }
-
- return ret;
-
- cleanup_candev:
- for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++)
- if (es58x_dev->netdev[ch_idx]) {
- unregister_candev(es58x_dev->netdev[ch_idx]);
- free_candev(es58x_dev->netdev[ch_idx]);
+ if (ret) {
+ es58x_free_netdevs(es58x_dev);
+ return ret;
}
- cleanup_es58x_dev:
- kfree(es58x_dev);
+ }
return ret;
}
@@ -2270,24 +2278,12 @@ static int es58x_probe(struct usb_interface *intf,
static void es58x_disconnect(struct usb_interface *intf)
{
struct es58x_device *es58x_dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- int i;
dev_info(&intf->dev, "Disconnecting %s %s\n",
es58x_dev->udev->manufacturer, es58x_dev->udev->product);
- for (i = 0; i < es58x_dev->num_can_ch; i++) {
- netdev = es58x_dev->netdev[i];
- if (!netdev)
- continue;
- unregister_candev(netdev);
- es58x_dev->netdev[i] = NULL;
- free_candev(netdev);
- }
-
+ es58x_free_netdevs(es58x_dev);
es58x_free_urbs(es58x_dev);
-
- kfree(es58x_dev);
usb_set_intfdata(intf, NULL);
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index fcf219e727bf..826a15871573 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -287,7 +287,7 @@ struct es58x_priv {
* @rx_urb_cmd_max_len: Maximum length of a RX URB command.
* @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head
* field of the struct es58x_priv into echo_skb
- * indexes. Properties: @fifo_mask = echos_skb_max - 1 where
+ * indexes. Properties: @fifo_mask = echo_skb_max - 1 where
* echo_skb_max must be a power of two. Also, echo_skb_max must
* not exceed the maximum size of the device internal TX FIFO
* length. This parameter is used to control the network queue
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 1a2779d383a4..af042aa55f59 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -357,8 +357,7 @@ static int es58x_fd_tx_can_msg(struct es58x_priv *priv,
return ret;
/* Fill message contents. */
- tx_can_msg = (struct es58x_fd_tx_can_msg *)
- &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len];
+ tx_can_msg = (typeof(tx_can_msg))&es58x_fd_urb_cmd->raw_msg[msg_len];
tx_can_msg->packet_idx = (u8)priv->tx_head;
put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id);
tx_can_msg->flags = (u8)es58x_get_flags(skb);
@@ -463,9 +462,9 @@ static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev)
}
/* Nominal bittiming constants for ES582.1 and ES584.1 as specified in
- * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family"
- * section 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register"
- * from Microchip.
+ * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section
+ * 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" from
+ * Microchip.
*
* The values from the specification are the hardware register
* values. To convert them to the functional values, all ranges were
@@ -484,8 +483,8 @@ static const struct can_bittiming_const es58x_fd_nom_bittiming_const = {
};
/* Data bittiming constants for ES582.1 and ES584.1 as specified in
- * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family"
- * section 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from
+ * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section
+ * 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from
* Microchip.
*/
static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
@@ -501,9 +500,9 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
};
/* Transmission Delay Compensation constants for ES582.1 and ES584.1
- * as specified in the microcontroller datasheet: "SAM
- * E701/S70/V70/V71 Family" section 49.6.15 "MCAN Transmitter Delay
- * Compensation Register" from Microchip.
+ * as specified in the microcontroller datasheet: "SAM E70/S70/V70/V71
+ * Family" section 49.6.15 "MCAN Transmitter Delay Compensation
+ * Register" from Microchip.
*/
static const struct can_tdc_const es58x_tdc_const = {
.tdcv_max = 0, /* Manual mode not supported. */
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h
index ee18a87e40c0..a191891b8777 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h
@@ -96,23 +96,14 @@ struct es58x_fd_bittiming {
* @ctrlmode: type enum es58x_fd_ctrlmode.
* @canfd_enabled: boolean (0: Classical CAN, 1: CAN and/or CANFD).
* @data_bittiming: Bittiming for flexible data-rate transmission.
- * @tdc_enabled: Transmitter Delay Compensation switch (0: disabled,
- * 1: enabled). On very high bitrates, the delay between when the
- * bit is sent and received on the CANTX and CANRX pins of the
- * transceiver start to be significant enough for errors to occur
- * and thus need to be compensated.
- * @tdco: Transmitter Delay Compensation Offset. Offset value, in time
- * quanta, defining the delay between the start of the bit
- * reception on the CANRX pin of the transceiver and the SSP
- * (Secondary Sample Point). Valid values: 0 to 127.
- * @tdcf: Transmitter Delay Compensation Filter window. Defines the
- * minimum value for the SSP position, in time quanta. The
- * feature is enabled when TDCF is configured to a value greater
- * than TDCO. Valid values: 0 to 127.
+ * @tdc_enabled: Transmitter Delay Compensation switch (0: TDC is
+ * disabled, 1: TDC is enabled).
+ * @tdco: Transmitter Delay Compensation Offset.
+ * @tdcf: Transmitter Delay Compensation Filter window.
*
- * Please refer to the microcontroller datasheet: "SAM
- * E701/S70/V70/V71 Family" section 49 "Controller Area Network
- * (MCAN)" for additional information.
+ * Please refer to the microcontroller datasheet: "SAM E70/S70/V70/V71
+ * Family" section 49 "Controller Area Network (MCAN)" for additional
+ * information.
*/
struct es58x_fd_tx_conf_msg {
struct es58x_fd_bittiming nominal_bittiming;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 899a3d21b77f..837b3fecd71e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -63,6 +63,8 @@
#define PCAN_USB_MSG_HEADER_LEN 2
+#define PCAN_USB_MSG_TX_CAN 2 /* Tx msg is a CAN frame */
+
/* PCAN-USB adapter internal clock (MHz) */
#define PCAN_USB_CRYSTAL_HZ 16000000
@@ -73,6 +75,10 @@
#define PCAN_USB_STATUSLEN_RTR (1 << 4)
#define PCAN_USB_STATUSLEN_DLC (0xf)
+/* PCAN-USB 4.1 CAN Id tx extended flags */
+#define PCAN_USB_TX_SRR 0x01 /* SJA1000 SRR command */
+#define PCAN_USB_TX_AT 0x02 /* SJA1000 AT command */
+
/* PCAN-USB error flags */
#define PCAN_USB_ERROR_TXFULL 0x01
#define PCAN_USB_ERROR_RXQOVR 0x02
@@ -385,7 +391,8 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
- *device_id = args[0];
+ else
+ *device_id = args[0];
return err;
}
@@ -446,145 +453,65 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
{
struct sk_buff *skb;
struct can_frame *cf;
- enum can_state new_state;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
/* ignore this error until 1st ts received */
if (n == PCAN_USB_ERROR_QOVR)
if (!mc->pdev->time_ref.tick_count)
return 0;
- new_state = mc->pdev->dev.can.state;
-
- switch (mc->pdev->dev.can.state) {
- case CAN_STATE_ERROR_ACTIVE:
- if (n & PCAN_USB_ERROR_BUS_LIGHT) {
- new_state = CAN_STATE_ERROR_WARNING;
- break;
- }
- fallthrough;
-
- case CAN_STATE_ERROR_WARNING:
- if (n & PCAN_USB_ERROR_BUS_HEAVY) {
- new_state = CAN_STATE_ERROR_PASSIVE;
- break;
- }
- if (n & PCAN_USB_ERROR_BUS_OFF) {
- new_state = CAN_STATE_BUS_OFF;
- break;
- }
- if (n & ~PCAN_USB_ERROR_BUS) {
- /*
- * trick to bypass next comparison and process other
- * errors
- */
- new_state = CAN_STATE_MAX;
- break;
- }
- if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
- /* no error (back to active state) */
- new_state = CAN_STATE_ERROR_ACTIVE;
- break;
- }
- break;
-
- case CAN_STATE_ERROR_PASSIVE:
- if (n & PCAN_USB_ERROR_BUS_OFF) {
- new_state = CAN_STATE_BUS_OFF;
- break;
- }
- if (n & PCAN_USB_ERROR_BUS_LIGHT) {
- new_state = CAN_STATE_ERROR_WARNING;
- break;
- }
- if (n & ~PCAN_USB_ERROR_BUS) {
- /*
- * trick to bypass next comparison and process other
- * errors
- */
- new_state = CAN_STATE_MAX;
- break;
- }
-
- if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
- /* no error (back to warning state) */
- new_state = CAN_STATE_ERROR_WARNING;
- break;
- }
- break;
-
- default:
- /* do nothing waiting for restart */
- return 0;
- }
-
- /* donot post any error if current state didn't change */
- if (mc->pdev->dev.can.state == new_state)
- return 0;
-
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(mc->netdev, &cf);
- if (!skb)
- return -ENOMEM;
-
- switch (new_state) {
- case CAN_STATE_BUS_OFF:
- cf->can_id |= CAN_ERR_BUSOFF;
- mc->pdev->dev.can.can_stats.bus_off++;
- can_bus_off(mc->netdev);
- break;
-
- case CAN_STATE_ERROR_PASSIVE:
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- cf->data[6] = mc->pdev->bec.txerr;
- cf->data[7] = mc->pdev->bec.rxerr;
-
- mc->pdev->dev.can.can_stats.error_passive++;
- break;
-
- case CAN_STATE_ERROR_WARNING:
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- cf->data[6] = mc->pdev->bec.txerr;
- cf->data[7] = mc->pdev->bec.rxerr;
-
- mc->pdev->dev.can.can_stats.error_warning++;
- break;
- case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
-
- /* sync local copies of rxerr/txerr counters */
- mc->pdev->bec.txerr = 0;
- mc->pdev->bec.rxerr = 0;
- break;
-
- default:
- /* CAN_STATE_MAX (trick to handle other errors) */
- if (n & PCAN_USB_ERROR_TXQFULL)
- netdev_dbg(mc->netdev, "device Tx queue full)\n");
-
- if (n & PCAN_USB_ERROR_RXQOVR) {
- netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ if (n & PCAN_USB_ERROR_RXQOVR) {
+ /* data overrun interrupt */
+ netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ mc->netdev->stats.rx_over_errors++;
+ mc->netdev->stats.rx_errors++;
+ if (cf) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- mc->netdev->stats.rx_over_errors++;
- mc->netdev->stats.rx_errors++;
}
+ }
- cf->data[6] = mc->pdev->bec.txerr;
- cf->data[7] = mc->pdev->bec.rxerr;
+ if (n & PCAN_USB_ERROR_TXQFULL)
+ netdev_dbg(mc->netdev, "device Tx queue full)\n");
- new_state = mc->pdev->dev.can.state;
- break;
+ if (n & PCAN_USB_ERROR_BUS_OFF) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (n & PCAN_USB_ERROR_BUS_HEAVY) {
+ new_state = ((mc->pdev->bec.txerr >= 128) ||
+ (mc->pdev->bec.rxerr >= 128)) ?
+ CAN_STATE_ERROR_PASSIVE :
+ CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
}
- mc->pdev->dev.can.state = new_state;
+ /* handle change of state */
+ if (new_state != mc->pdev->dev.can.state) {
+ enum can_state tx_state =
+ (mc->pdev->bec.txerr >= mc->pdev->bec.rxerr) ?
+ new_state : 0;
+ enum can_state rx_state =
+ (mc->pdev->bec.txerr <= mc->pdev->bec.rxerr) ?
+ new_state : 0;
+
+ can_change_state(mc->netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(mc->netdev);
+ } else if (cf && (cf->can_id & CAN_ERR_CRTL)) {
+ /* Supply TX/RX error counters in case of
+ * controller error.
+ */
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+ }
+ }
+
+ if (!skb)
+ return -ENOMEM;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
@@ -706,6 +633,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
struct sk_buff *skb;
struct can_frame *cf;
struct skb_shared_hwtstamps *hwts;
+ u32 can_id_flags;
skb = alloc_can_skb(mc->netdev, &cf);
if (!skb)
@@ -715,13 +643,15 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
if ((mc->ptr + 4) > mc->end)
goto decode_failed;
- cf->can_id = get_unaligned_le32(mc->ptr) >> 3 | CAN_EFF_FLAG;
+ can_id_flags = get_unaligned_le32(mc->ptr);
+ cf->can_id = can_id_flags >> 3 | CAN_EFF_FLAG;
mc->ptr += 4;
} else {
if ((mc->ptr + 2) > mc->end)
goto decode_failed;
- cf->can_id = get_unaligned_le16(mc->ptr) >> 5;
+ can_id_flags = get_unaligned_le16(mc->ptr);
+ cf->can_id = can_id_flags >> 5;
mc->ptr += 2;
}
@@ -744,6 +674,10 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
memcpy(cf->data, mc->ptr, cf->len);
mc->ptr += rec_len;
+
+ /* Ignore next byte (client private id) if SRR bit is set */
+ if (can_id_flags & PCAN_USB_TX_SRR)
+ mc->ptr++;
}
/* convert timestamp into kernel time */
@@ -821,10 +755,11 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
struct net_device *netdev = dev->netdev;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 can_id_flags = cf->can_id & CAN_ERR_MASK;
u8 *pc;
- obuf[0] = 2;
- obuf[1] = 1;
+ obuf[0] = PCAN_USB_MSG_TX_CAN;
+ obuf[1] = 1; /* only one CAN frame is stored in the packet */
pc = obuf + PCAN_USB_MSG_HEADER_LEN;
@@ -839,12 +774,28 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
*pc |= PCAN_USB_STATUSLEN_EXT_ID;
pc++;
- put_unaligned_le32((cf->can_id & CAN_ERR_MASK) << 3, pc);
+ can_id_flags <<= 3;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ can_id_flags |= PCAN_USB_TX_SRR;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ can_id_flags |= PCAN_USB_TX_AT;
+
+ put_unaligned_le32(can_id_flags, pc);
pc += 4;
} else {
pc++;
- put_unaligned_le16((cf->can_id & CAN_ERR_MASK) << 5, pc);
+ can_id_flags <<= 5;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ can_id_flags |= PCAN_USB_TX_SRR;
+
+ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ can_id_flags |= PCAN_USB_TX_AT;
+
+ put_unaligned_le16(can_id_flags, pc);
pc += 2;
}
@@ -854,6 +805,10 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
pc += cf->len;
}
+ /* SRR bit needs a writer id (useless here) */
+ if (can_id_flags & PCAN_USB_TX_SRR)
+ *pc++ = 0x80;
+
obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff);
return 0;
@@ -928,6 +883,19 @@ static int pcan_usb_init(struct peak_usb_device *dev)
return err;
}
+ /* Since rev 4.1, PCAN-USB is able to make single-shot as well as
+ * looped back frames.
+ */
+ if (dev->device_rev >= 41) {
+ struct can_priv *priv = netdev_priv(dev->netdev);
+
+ priv->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT |
+ CAN_CTRLMODE_LOOPBACK;
+ } else {
+ dev_info(dev->netdev->dev.parent,
+ "Firmware update available. Please contact support@peak-system.com\n");
+ }
+
dev_info(dev->netdev->dev.parent,
"PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
pcan_usb.name, dev->device_rev, serial_number,
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index b23e3488695b..bd1417a66cbf 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2016,15 +2016,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_br_flags);
-int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
- struct netlink_ext_ack *extack)
-{
- b53_port_set_mcast_flood(ds->priv, port, mrouter);
-
- return 0;
-}
-EXPORT_SYMBOL(b53_set_mrouter);
-
static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
{
/* Broadcom switches will accept enabling Broadcom tags on the
@@ -2268,7 +2259,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
.port_bridge_flags = b53_br_flags,
- .port_set_mrouter = b53_set_mrouter,
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 82700a5714c1..9bf8319342b0 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -328,8 +328,6 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
int b53_br_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
-int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
- struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
void b53_phylink_validate(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3b018fcf4412..6ce9ec1283e0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1199,7 +1199,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.port_pre_bridge_flags = b53_br_flags_pre,
.port_bridge_flags = b53_br_flags,
.port_stp_state_set = b53_br_set_stp_state,
- .port_set_mrouter = b53_set_mrouter,
.port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_add = b53_vlan_add,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 632f0fcc5aa7..d757d9dcba51 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -367,8 +367,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
int i;
reg[1] |= vid & CVID_MASK;
- if (vid > 1)
- reg[1] |= ATA2_IVL;
+ reg[1] |= ATA2_IVL;
+ reg[1] |= ATA2_FID(FID_BRIDGED);
reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
/* STATIC_ENT indicate that entry is static wouldn't
@@ -1022,6 +1022,10 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_write(priv, MT7530_PCR_P(port),
PCR_MATRIX(dsa_user_ports(priv->ds)));
+ /* Set to fallback mode for independent VLAN learning */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+
return 0;
}
@@ -1144,7 +1148,8 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
break;
}
- mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK, stp_state);
+ mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED),
+ FID_PST(FID_BRIDGED, stp_state));
}
static int
@@ -1186,18 +1191,6 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
}
static int
-mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
- struct netlink_ext_ack *extack)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
- mrouter ? UNM_FFP(BIT(port)) : 0);
-
- return 0;
-}
-
-static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
@@ -1230,6 +1223,10 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+ /* Set to fallback mode for independent VLAN learning */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+
mutex_unlock(&priv->reg_mutex);
return 0;
@@ -1242,15 +1239,22 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
bool all_user_ports_removed = true;
int i;
- /* When a port is removed from the bridge, the port would be set up
- * back to the default as is at initial boot which is a VLAN-unaware
- * port.
+ /* This is called after .port_bridge_leave when leaving a VLAN-aware
+ * bridge. Don't set standalone ports to fallback mode.
*/
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
- MT7530_PORT_MATRIX_MODE);
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ if (dsa_to_port(ds, port)->bridge_dev)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK,
VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
- PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) |
+ MT7530_VLAN_ACC_ALL);
+
+ /* Set PVID to 0 */
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
@@ -1277,15 +1281,19 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
struct mt7530_priv *priv = ds->priv;
/* Trapped into security mode allows packet forwarding through VLAN
- * table lookup. CPU port is set to fallback mode to let untagged
- * frames pass through.
+ * table lookup.
*/
- if (dsa_is_cpu_port(ds, port))
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
- MT7530_PORT_FALLBACK_MODE);
- else
+ if (dsa_is_user_port(ds, port)) {
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_SECURITY_MODE);
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID(priv->ports[port].pvid));
+
+ /* Only accept tagged frames if PVID is not set */
+ if (!priv->ports[port].pvid)
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_TAGGED);
+ }
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
@@ -1308,11 +1316,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
- * And the other port's port matrix cannot be broken when the
- * other port is still a VLAN-aware port.
*/
- if (dsa_is_user_port(ds, i) && i != port &&
- !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
+ if (dsa_is_user_port(ds, i) && i != port) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -1330,6 +1335,13 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
PCR_MATRIX(BIT(MT7530_CPU_PORT)));
priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ /* When a port is removed from the bridge, the port would be set up
+ * back to the default as is at initial boot which is a VLAN-unaware
+ * port.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_MATRIX_MODE);
+
mutex_unlock(&priv->reg_mutex);
}
@@ -1512,7 +1524,8 @@ mt7530_hw_vlan_add(struct mt7530_priv *priv,
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members.
*/
- val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID;
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) |
+ VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
/* Decide whether adding tag or not for those outgoing packets from the
@@ -1602,9 +1615,28 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
if (pvid) {
- mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
- G0_PORT_VID(vlan->vid));
priv->ports[port].pvid = vlan->vid;
+
+ /* Accept all frames if PVID is set */
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_ALL);
+
+ /* Only configure PVID if VLAN filtering is enabled */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PPBV1_P(port),
+ G0_PORT_VID_MASK,
+ G0_PORT_VID(vlan->vid));
+ } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) {
+ /* This VLAN is overwritten without PVID, so unset it */
+ priv->ports[port].pvid = G0_PORT_VID_DEF;
+
+ /* Only accept tagged frames if the port is VLAN-aware */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_TAGGED);
+
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
}
mutex_unlock(&priv->reg_mutex);
@@ -1618,11 +1650,9 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
{
struct mt7530_hw_vlan_entry target_entry;
struct mt7530_priv *priv = ds->priv;
- u16 pvid;
mutex_lock(&priv->reg_mutex);
- pvid = priv->ports[port].pvid;
mt7530_hw_vlan_entry_init(&target_entry, port, 0);
mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
mt7530_hw_vlan_del);
@@ -1630,11 +1660,18 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* PVID is being restored to the default whenever the PVID port
* is being removed from the VLAN.
*/
- if (pvid == vlan->vid)
- pvid = G0_PORT_VID_DEF;
+ if (priv->ports[port].pvid == vlan->vid) {
+ priv->ports[port].pvid = G0_PORT_VID_DEF;
+
+ /* Only accept tagged frames if the port is VLAN-aware */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_TAGGED);
+
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
- mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
- priv->ports[port].pvid = pvid;
mutex_unlock(&priv->reg_mutex);
@@ -1718,15 +1755,7 @@ static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (port != MT7530_CPU_PORT) {
- dev_warn(priv->dev,
- "port not matched with tagging CPU port\n");
- return DSA_TAG_PROTO_NONE;
- } else {
- return DSA_TAG_PROTO_MTK;
- }
+ return DSA_TAG_PROTO_MTK;
}
#ifdef CONFIG_GPIOLIB
@@ -2055,6 +2084,7 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
if (priv->id == ID_MT7530) {
@@ -2125,6 +2155,9 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
+ /* Disable learning by default on all ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+
if (dsa_is_cpu_port(ds, i)) {
ret = mt753x_cpu_port_enable(ds, i);
if (ret)
@@ -2132,8 +2165,9 @@ mt7530_setup(struct dsa_switch *ds)
} else {
mt7530_port_disable(ds, i);
- /* Disable learning by default on all user ports */
- mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+ /* Set default PVID to 0 on all user ports */
+ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
}
/* Enable consistent egress tag */
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
@@ -2290,6 +2324,9 @@ mt7531_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
+ /* Disable learning by default on all ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
if (dsa_is_cpu_port(ds, i)) {
@@ -2299,8 +2336,9 @@ mt7531_setup(struct dsa_switch *ds)
} else {
mt7530_port_disable(ds, i);
- /* Disable learning by default on all user ports */
- mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+ /* Set default PVID to 0 on all user ports */
+ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
}
/* Enable consistent egress tag */
@@ -2308,6 +2346,7 @@ mt7531_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
/* Flush the FDB table */
@@ -3061,7 +3100,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_stp_state_set = mt7530_stp_state_set,
.port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
.port_bridge_flags = mt7530_port_bridge_flags,
- .port_set_mrouter = mt7530_port_set_mrouter,
.port_bridge_join = mt7530_port_bridge_join,
.port_bridge_leave = mt7530_port_bridge_leave,
.port_fdb_add = mt7530_port_fdb_add,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b19b389ff10a..fe4cd2ac26d0 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -80,6 +80,7 @@ enum mt753x_bpdu_port_fw {
#define STATIC_ENT 3
#define MT7530_ATA2 0x78
#define ATA2_IVL BIT(15)
+#define ATA2_FID(x) (((x) & 0x7) << 12)
/* Register for address table write data */
#define MT7530_ATWD 0x7c
@@ -148,11 +149,18 @@ enum mt7530_vlan_cmd {
#define VTAG_EN BIT(28)
/* VLAN Member Control */
#define PORT_MEM(x) (((x) & 0xff) << 16)
+/* Filter ID */
+#define FID(x) (((x) & 0x7) << 1)
/* VLAN Entry Valid */
#define VLAN_VALID BIT(0)
#define PORT_MEM_SHFT 16
#define PORT_MEM_MASK 0xff
+enum mt7530_fid {
+ FID_STANDALONE = 0,
+ FID_BRIDGED = 1,
+};
+
#define MT7530_VAWD2 0x98
/* Egress Tag Control */
#define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1))
@@ -179,8 +187,8 @@ enum mt7530_vlan_egress_attr {
/* Register for port STP state control */
#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
-#define FID_PST(x) ((x) & 0x3)
-#define FID_PST_MASK FID_PST(0x3)
+#define FID_PST(fid, state) (((state) & 0x3) << ((fid) * 2))
+#define FID_PST_MASK(fid) FID_PST(fid, 0x3)
enum mt7530_stp_state {
MT7530_STP_DISABLED = 0,
@@ -230,6 +238,7 @@ enum mt7530_port_mode {
#define PVC_EG_TAG_MASK PVC_EG_TAG(7)
#define VLAN_ATTR(x) (((x) & 0x3) << 6)
#define VLAN_ATTR_MASK VLAN_ATTR(3)
+#define ACC_FRM_MASK GENMASK(1, 0)
enum mt7530_vlan_port_eg_tag {
MT7530_VLAN_EG_DISABLED = 0,
@@ -241,13 +250,19 @@ enum mt7530_vlan_port_attr {
MT7530_VLAN_TRANSPARENT = 3,
};
+enum mt7530_vlan_port_acc_frm {
+ MT7530_VLAN_ACC_ALL = 0,
+ MT7530_VLAN_ACC_TAGGED = 1,
+ MT7530_VLAN_ACC_UNTAGGED = 2,
+};
+
#define STAG_VPID (((x) & 0xffff) << 16)
/* Register for port port-and-protocol based vlan 1 control */
#define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100))
#define G0_PORT_VID(x) (((x) & 0xfff) << 0)
#define G0_PORT_VID_MASK G0_PORT_VID(0xfff)
-#define G0_PORT_VID_DEF G0_PORT_VID(1)
+#define G0_PORT_VID_DEF G0_PORT_VID(0)
/* Register for port MAC control register */
#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 634a48e6616b..7a2445a34eb7 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -2,6 +2,7 @@
config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA
+ depends on PTP_1588_CLOCK_OPTIONAL
select IRQ_DOMAIN
select NET_DSA_TAG_EDSA
select NET_DSA_TAG_DSA
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 272b0535d946..c45ca2473743 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1221,14 +1221,36 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
bool found = false;
u16 pvlan;
- list_for_each_entry(dp, &dst->ports, list) {
- if (dp->ds->index == dev && dp->index == port) {
+ /* dev is a physical switch */
+ if (dev <= dst->last_switch) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index == dev && dp->index == port) {
+ /* dp might be a DSA link or a user port, so it
+ * might or might not have a bridge_dev
+ * pointer. Use the "found" variable for both
+ * cases.
+ */
+ br = dp->bridge_dev;
+ found = true;
+ break;
+ }
+ }
+ /* dev is a virtual bridge */
+ } else {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_num < 0)
+ continue;
+
+ if (dp->bridge_num + 1 + dst->last_switch != dev)
+ continue;
+
+ br = dp->bridge_dev;
found = true;
break;
}
}
- /* Prevent frames from unknown switch or port */
+ /* Prevent frames from unknown switch or virtual bridge */
if (!found)
return 0;
@@ -1236,7 +1258,6 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
- br = dp->bridge_dev;
pvlan = 0;
/* Frames from user ports can egress any local DSA links and CPU ports,
@@ -2422,6 +2443,44 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
mv88e6xxx_reg_unlock(chip);
}
+/* Treat the software bridge as a virtual single-port switch behind the
+ * CPU and map in the PVT. First dst->last_switch elements are taken by
+ * physical switches, so start from beyond that range.
+ */
+static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
+ int bridge_num)
+{
+ u8 dev = bridge_num + ds->dst->last_switch + 1;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_pvt_map(chip, dev, 0);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
+ struct net_device *br,
+ int bridge_num)
+{
+ return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
+}
+
+static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
+ struct net_device *br,
+ int bridge_num)
+{
+ int err;
+
+ err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
+ if (err) {
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n",
+ ERR_PTR(err));
+ }
+}
+
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->reset)
@@ -3025,6 +3084,15 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+ /* Since virtual bridges are mapped in the PVT, the number we support
+ * depends on the physical switch topology. We need to let DSA figure
+ * that out and therefore we cannot set this at dsa_register_switch()
+ * time.
+ */
+ if (mv88e6xxx_has_pvt(chip))
+ ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
+ ds->dst->last_switch - 1;
+
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->setup_errata) {
@@ -5729,7 +5797,6 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- bool do_fast_age = false;
int err = -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -5741,9 +5808,6 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
err = mv88e6xxx_port_set_assoc_vector(chip, port, pav);
if (err)
goto out;
-
- if (!learning)
- do_fast_age = true;
}
if (flags.mask & BR_FLOOD) {
@@ -5775,26 +5839,6 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
out:
mv88e6xxx_reg_unlock(chip);
- if (do_fast_age)
- mv88e6xxx_port_fast_age(ds, port);
-
- return err;
-}
-
-static int mv88e6xxx_port_set_mrouter(struct dsa_switch *ds, int port,
- bool mrouter,
- struct netlink_ext_ack *extack)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (!chip->info->ops->port_set_mcast_flood)
- return -EOPNOTSUPP;
-
- mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->port_set_mcast_flood(chip, port, mrouter);
- mv88e6xxx_reg_unlock(chip);
-
return err;
}
@@ -6099,7 +6143,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
- .port_set_mrouter = mv88e6xxx_port_set_mrouter,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
@@ -6128,6 +6171,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
+ .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload,
+ .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 932b6b6fe817..9948544ba1c4 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -5,6 +5,7 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_MICROSEMI
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
@@ -19,6 +20,7 @@ config NET_DSA_MSCC_SEVILLE
depends on NET_DSA
depends on NET_VENDOR_MICROSEMI
depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index a2a15919b960..cbe23b20f3fa 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -231,11 +231,6 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
return 0;
}
-static const struct dsa_8021q_ops felix_tag_8021q_ops = {
- .vlan_add = felix_tag_8021q_vlan_add,
- .vlan_del = felix_tag_8021q_vlan_del,
-};
-
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
@@ -425,29 +420,18 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
- felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx),
- GFP_KERNEL);
- if (!felix->dsa_8021q_ctx)
- return -ENOMEM;
-
- felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops;
- felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD);
- felix->dsa_8021q_ctx->ds = ds;
-
- err = dsa_8021q_setup(felix->dsa_8021q_ctx, true);
+ err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
- goto out_free_dsa_8021_ctx;
+ return err;
err = felix_setup_mmio_filtering(felix);
if (err)
- goto out_teardown_dsa_8021q;
+ goto out_tag_8021q_unregister;
return 0;
-out_teardown_dsa_8021q:
- dsa_8021q_setup(felix->dsa_8021q_ctx, false);
-out_free_dsa_8021_ctx:
- kfree(felix->dsa_8021q_ctx);
+out_tag_8021q_unregister:
+ dsa_tag_8021q_unregister(ds);
return err;
}
@@ -462,11 +446,7 @@ static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
- err = dsa_8021q_setup(felix->dsa_8021q_ctx, false);
- if (err)
- dev_err(ds->dev, "dsa_8021q_setup returned %d", err);
-
- kfree(felix->dsa_8021q_ctx);
+ dsa_tag_8021q_unregister(ds);
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -816,23 +796,6 @@ static int felix_vlan_del(struct dsa_switch *ds, int port,
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
-static int felix_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct ocelot *ocelot = ds->priv;
-
- ocelot_port_enable(ocelot, port, phy);
-
- return 0;
-}
-
-static void felix_port_disable(struct dsa_switch *ds, int port)
-{
- struct ocelot *ocelot = ds->priv;
-
- return ocelot_port_disable(ocelot, port);
-}
-
static void felix_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -861,25 +824,9 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ocelot *ocelot = ds->priv;
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- int err;
-
- ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
- DEV_MAC_ENA_CFG);
- ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
-
- err = ocelot_port_flush(ocelot, port);
- if (err)
- dev_err(ocelot->dev, "failed to flush port %d: %d\n",
- port, err);
-
- /* Put the port in reset. */
- ocelot_port_writel(ocelot_port,
- DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_RX_RST |
- DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
- DEV_CLOCK_CFG);
+ ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
+ FELIX_MAC_QUIRKS);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -890,75 +837,11 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct ocelot *ocelot = ds->priv;
- struct ocelot_port *ocelot_port = ocelot->ports[port];
struct felix *felix = ocelot_to_felix(ocelot);
- u32 mac_fc_cfg;
-
- /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
- * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is
- * integrated is that the MAC speed is fixed and it's the PCS who is
- * performing the rate adaptation, so we have to write "1000Mbps" into
- * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default
- * value).
- */
- ocelot_port_writel(ocelot_port,
- DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
- DEV_CLOCK_CFG);
-
- switch (speed) {
- case SPEED_10:
- mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3);
- break;
- case SPEED_100:
- mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2);
- break;
- case SPEED_1000:
- case SPEED_2500:
- mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1);
- break;
- default:
- dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
- port, speed);
- return;
- }
-
- /* handle Rx pause in all cases, with 2500base-X this is used for rate
- * adaptation.
- */
- mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
-
- if (tx_pause)
- mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
- SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
- SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
- SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
-
- /* Flow control. Link speed is only used here to evaluate the time
- * specification in incoming pause frames.
- */
- ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
-
- ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
-
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
-
- /* Undo the effects of felix_phylink_mac_link_down:
- * enable MAC module
- */
- ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
- DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
-
- /* Enable receiving frames on the port, and activate auto-learning of
- * MAC addresses.
- */
- ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
- ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(port),
- ANA_PORT_PORT_CFG, port);
- /* Core: Enable port for frame transfer */
- ocelot_fields_write(ocelot, port,
- QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
+ ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
+ interface, speed, duplex, tx_pause, rx_pause,
+ FELIX_MAC_QUIRKS);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
@@ -1635,8 +1518,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
- .port_enable = felix_port_enable,
- .port_disable = felix_port_disable,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
@@ -1679,6 +1560,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del = felix_mrp_del,
.port_mrp_add_ring_role = felix_mrp_add_ring_role,
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
+ .tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 4d96cad815d5..5854bab43327 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -5,6 +5,7 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
/* Platform-specific information */
struct felix_info {
@@ -60,7 +61,6 @@ struct felix {
struct lynx_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
- struct dsa_8021q_context *dsa_8021q_ctx;
enum dsa_tag_protocol tag_proto;
};
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index b29d41e5e1e7..1291bba3f3b6 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -2,6 +2,7 @@
config NET_DSA_SJA1105
tristate "NXP SJA1105 Ethernet switch family support"
depends on NET_DSA && SPI
+ depends on PTP_1588_CLOCK_OPTIONAL
select NET_DSA_TAG_SJA1105
select PCS_XPCS
select PACKING
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 221c7abdef0e..2e899c9f036d 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -226,28 +226,13 @@ struct sja1105_flow_block {
int num_virtual_links;
};
-struct sja1105_bridge_vlan {
- struct list_head list;
- int port;
- u16 vid;
- bool pvid;
- bool untagged;
-};
-
-enum sja1105_vlan_state {
- SJA1105_VLAN_UNAWARE,
- SJA1105_VLAN_BEST_EFFORT,
- SJA1105_VLAN_FILTERING_FULL,
-};
-
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
- bool best_effort_vlan_filtering;
- unsigned long learn_ena;
+ bool vlan_aware;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
const struct sja1105_info *info;
@@ -255,16 +240,14 @@ struct sja1105_private {
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
- struct list_head dsa_8021q_vlans;
- struct list_head bridge_vlans;
+ u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
+ u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
- struct dsa_8021q_context *dsa_8021q_ctx;
- enum sja1105_vlan_state vlan_state;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct mii_bus *mdio_base_t1;
@@ -311,10 +294,6 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
void sja1105_devlink_teardown(struct dsa_switch *ds);
-int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx);
-int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx);
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index b6a4a16b8c7e..05c7f4ca3b1a 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -115,105 +115,6 @@ static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
kfree(priv->regions);
}
-static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
- bool *be_vlan)
-{
- *be_vlan = priv->best_effort_vlan_filtering;
-
- return 0;
-}
-
-static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
- bool be_vlan)
-{
- struct dsa_switch *ds = priv->ds;
- bool vlan_filtering;
- int port;
- int rc;
-
- priv->best_effort_vlan_filtering = be_vlan;
-
- rtnl_lock();
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp;
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- dp = dsa_to_port(ds, port);
- vlan_filtering = dsa_port_is_vlan_filtering(dp);
-
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering, NULL);
- if (rc)
- break;
- }
- rtnl_unlock();
-
- return rc;
-}
-
-enum sja1105_devlink_param_id {
- SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
-};
-
-int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct sja1105_private *priv = ds->priv;
- int err;
-
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_get(priv,
- &ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct sja1105_private *priv = ds->priv;
- int err;
-
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_set(priv,
- ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static const struct devlink_param sja1105_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
- "best_effort_vlan_filtering",
- DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
-};
-
-static int sja1105_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
-static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -233,23 +134,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds,
int sja1105_devlink_setup(struct dsa_switch *ds)
{
- int rc;
-
- rc = sja1105_setup_devlink_params(ds);
- if (rc)
- return rc;
-
- rc = sja1105_setup_devlink_regions(ds);
- if (rc < 0) {
- sja1105_teardown_devlink_params(ds);
- return rc;
- }
-
- return 0;
+ return sja1105_setup_devlink_regions(ds);
}
void sja1105_devlink_teardown(struct dsa_switch *ds)
{
- sja1105_teardown_devlink_params(ds);
sja1105_teardown_devlink_regions(ds);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 147709131c13..f2049f52833c 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1355,14 +1355,14 @@ u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params =
priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
- u64 poly_koopman = l2_lookup_params->poly;
+ u64 input, poly_koopman = l2_lookup_params->poly;
/* Convert polynomial from Koopman to 'normal' notation */
u8 poly = (u8)(1 + (poly_koopman << 1));
- u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid;
- u64 input = (vlanid << 48) | ether_addr_to_u64(addr);
u8 crc = 0; /* seed */
int i;
+ input = ((u64)vid << 48) | ether_addr_to_u64(addr);
+
/* Mask the eight bytes starting from MSB one at a time */
for (i = 56; i >= 0; i -= 8) {
u8 byte = (input & (0xffull << i)) >> i;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 49eb0ac41b7d..05ba65042b5f 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -28,8 +28,6 @@
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
-static const struct dsa_switch_ops sja1105_switch_ops;
-
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
unsigned int startup_delay)
{
@@ -57,6 +55,81 @@ static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
return !!(l2_fwd[from].reach_port & BIT(to));
}
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].drpuntag == drop)
+ return 0;
+
+ mac[port].drpuntag = drop;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].vlanid == pvid)
+ return 0;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_vlan_lookup_entry *vlan;
+ bool drop_untagged = false;
+ int match, rc;
+ u16 pvid;
+
+ if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
+ pvid = priv->bridge_pvid[port];
+ else
+ pvid = priv->tag_8021q_pvid[port];
+
+ rc = sja1105_pvid_apply(priv, port, pvid);
+ if (rc)
+ return rc;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+
+ match = sja1105_is_vlan_configured(priv, pvid);
+
+ if (match < 0 || !(vlan[match].vmemb_port & BIT(port)))
+ drop_untagged = true;
+
+ return sja1105_drop_untagged(ds, port, drop_untagged);
+}
+
static int sja1105_init_mac_settings(struct sja1105_private *priv)
{
struct sja1105_mac_config_entry default_mac = {
@@ -101,7 +174,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
- int i;
+ struct dsa_port *dp;
table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
@@ -120,14 +193,21 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
mac = table->entries;
- for (i = 0; i < ds->num_ports; i++) {
- mac[i] = default_mac;
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+
+ mac[dp->index] = default_mac;
/* Let sja1105_bridge_stp_state_set() keep address learning
- * enabled for the CPU port.
+ * enabled for the DSA ports. CPU ports use software-assisted
+ * learning to ensure that only FDB entries belonging to the
+ * bridge are learned, and that they are learned towards all
+ * CPU ports in a cross-chip topology if multiple CPU ports
+ * exist.
*/
- if (dsa_is_cpu_port(ds, i))
- priv->learn_ena |= BIT(i);
+ if (dsa_port_is_dsa(dp))
+ dp->learning = true;
}
return 0;
@@ -378,8 +458,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_bridge_vlan *v;
-
if (dsa_is_unused_port(ds, port))
continue;
@@ -387,22 +465,10 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
- v = kzalloc(sizeof(*v), GFP_KERNEL);
- if (!v)
- return -ENOMEM;
-
- v->port = port;
- v->vid = SJA1105_DEFAULT_VLAN;
- v->untagged = true;
- if (dsa_is_cpu_port(ds, port))
- v->pvid = true;
- list_add(&v->list, &priv->dsa_8021q_vlans);
-
- v = kmemdup(v, sizeof(*v), GFP_KERNEL);
- if (!v)
- return -ENOMEM;
-
- list_add(&v->list, &priv->bridge_vlans);
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN;
+ priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN;
+ }
}
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
@@ -413,8 +479,11 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_entry *l2fwd;
struct dsa_switch *ds = priv->ds;
+ struct dsa_switch_tree *dst;
struct sja1105_table *table;
- int i, j;
+ struct dsa_link *dl;
+ int port, tc;
+ int from, to;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
@@ -432,47 +501,109 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
l2fwd = table->entries;
- /* First 5 entries define the forwarding rules */
- for (i = 0; i < ds->num_ports; i++) {
- unsigned int upstream = dsa_upstream_port(priv->ds, i);
+ /* First 5 entries in the L2 Forwarding Table define the forwarding
+ * rules and the VLAN PCP to ingress queue mapping.
+ * Set up the ingress queue mapping first.
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ l2fwd[port].vlan_pmap[tc] = tc;
+ }
- if (dsa_is_unused_port(ds, i))
+ /* Then manage the forwarding domain for user ports. These can forward
+ * only to the always-on domain (CPU port and DSA links)
+ */
+ for (from = 0; from < ds->num_ports; from++) {
+ if (!dsa_is_user_port(ds, from))
continue;
- for (j = 0; j < SJA1105_NUM_TC; j++)
- l2fwd[i].vlan_pmap[j] = j;
+ for (to = 0; to < ds->num_ports; to++) {
+ if (!dsa_is_cpu_port(ds, to) &&
+ !dsa_is_dsa_port(ds, to))
+ continue;
- /* All ports start up with egress flooding enabled,
- * including the CPU port.
- */
- priv->ucast_egress_floods |= BIT(i);
- priv->bcast_egress_floods |= BIT(i);
+ l2fwd[from].bc_domain |= BIT(to);
+ l2fwd[from].fl_domain |= BIT(to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, true);
+ }
+ }
+
+ /* Then manage the forwarding domain for DSA links and CPU ports (the
+ * always-on domain). These can send packets to any enabled port except
+ * themselves.
+ */
+ for (from = 0; from < ds->num_ports; from++) {
+ if (!dsa_is_cpu_port(ds, from) && !dsa_is_dsa_port(ds, from))
+ continue;
+
+ for (to = 0; to < ds->num_ports; to++) {
+ if (dsa_is_unused_port(ds, to))
+ continue;
- if (i == upstream)
+ if (from == to)
+ continue;
+
+ l2fwd[from].bc_domain |= BIT(to);
+ l2fwd[from].fl_domain |= BIT(to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, true);
+ }
+ }
+
+ /* In odd topologies ("H" connections where there is a DSA link to
+ * another switch which also has its own CPU port), TX packets can loop
+ * back into the system (they are flooded from CPU port 1 to the DSA
+ * link, and from there to CPU port 2). Prevent this from happening by
+ * cutting RX from DSA links towards our CPU port, if the remote switch
+ * has its own CPU port and therefore doesn't need ours for network
+ * stack termination.
+ */
+ dst = ds->dst;
+
+ list_for_each_entry(dl, &dst->rtable, list) {
+ if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp)
continue;
- sja1105_port_allow_traffic(l2fwd, i, upstream, true);
- sja1105_port_allow_traffic(l2fwd, upstream, i, true);
+ from = dl->dp->index;
+ to = dsa_upstream_port(ds, from);
- l2fwd[i].bc_domain = BIT(upstream);
- l2fwd[i].fl_domain = BIT(upstream);
+ dev_warn(ds->dev,
+ "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n",
+ from, to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, false);
+
+ l2fwd[from].bc_domain &= ~BIT(to);
+ l2fwd[from].fl_domain &= ~BIT(to);
+ }
+
+ /* Finally, manage the egress flooding domain. All ports start up with
+ * flooding enabled, including the CPU port and DSA links.
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
- l2fwd[upstream].bc_domain |= BIT(i);
- l2fwd[upstream].fl_domain |= BIT(i);
+ priv->ucast_egress_floods |= BIT(port);
+ priv->bcast_egress_floods |= BIT(port);
}
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
*/
- for (i = 0; i < SJA1105_NUM_TC; i++) {
- for (j = 0; j < ds->num_ports; j++) {
- if (dsa_is_unused_port(ds, j))
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++) {
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
continue;
- l2fwd[ds->num_ports + i].vlan_pmap[j] = i;
+ l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc;
}
- l2fwd[ds->num_ports + i].type_egrpcp2outputq = true;
+ l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true;
}
return 0;
@@ -551,18 +682,11 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
- int max_mem = priv->info->max_frame_mem;
struct sja1105_table *table;
- /* VLAN retagging is implemented using a loopback port that consumes
- * frame buffers. That leaves less for us.
- */
- if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
- max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
-
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
l2_fwd_params = table->entries;
- l2_fwd_params->part_spc[0] = max_mem;
+ l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY;
/* If we have any critical-traffic virtual links, we need to reserve
* some frame buffer memory for them. At the moment, hardcode the value
@@ -634,6 +758,72 @@ static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
general_params->tdmaconfigidx = tdmaconfigidx;
}
+static int sja1105_init_topology(struct sja1105_private *priv,
+ struct sja1105_general_params_entry *general_params)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ /* The host port is the destination for traffic matching mac_fltres1
+ * and mac_fltres0 on all ports except itself. Default to an invalid
+ * value.
+ */
+ general_params->host_port = ds->num_ports;
+
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address, and no RX timestamps will be
+ * taken either (presumably because it is a cascaded port and a
+ * downstream SJA switch already did that).
+ * To disable the feature, we need to do different things depending on
+ * switch generation. On SJA1105 we need to set an invalid port, while
+ * on SJA1110 which support multiple cascaded ports, this field is a
+ * bitmask so it must be left zero.
+ */
+ if (!priv->info->multiple_cascade_ports)
+ general_params->casc_port = ds->num_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ bool is_upstream = dsa_is_upstream_port(ds, port);
+ bool is_dsa_link = dsa_is_dsa_port(ds, port);
+
+ /* Upstream ports can be dedicated CPU ports or
+ * upstream-facing DSA links
+ */
+ if (is_upstream) {
+ if (general_params->host_port == ds->num_ports) {
+ general_params->host_port = port;
+ } else {
+ dev_err(ds->dev,
+ "Port %llu is already a host port, configuring %d as one too is not supported\n",
+ general_params->host_port, port);
+ return -EINVAL;
+ }
+ }
+
+ /* Cascade ports are downstream-facing DSA links */
+ if (is_dsa_link && !is_upstream) {
+ if (priv->info->multiple_cascade_ports) {
+ general_params->casc_port |= BIT(port);
+ } else if (general_params->casc_port == ds->num_ports) {
+ general_params->casc_port = port;
+ } else {
+ dev_err(ds->dev,
+ "Port %llu is already a cascade port, configuring %d as one too is not supported\n",
+ general_params->casc_port, port);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (general_params->host_port == ds->num_ports) {
+ dev_err(ds->dev, "No host port configured\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
@@ -652,12 +842,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
.incl_srcpt0 = false,
.send_meta0 = false,
- /* The destination for traffic matching mac_fltres1 and
- * mac_fltres0 on all ports except host_port. Such traffic
- * receieved on host_port itself would be dropped, except
- * by installing a temporary 'management route'
- */
- .host_port = priv->ds->num_ports,
/* Default to an invalid value */
.mirr_port = priv->ds->num_ports,
/* No TTEthernet */
@@ -677,16 +861,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.header_type = ETH_P_SJA1110,
};
struct sja1105_general_params_entry *general_params;
- struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
- int port;
+ int rc;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- default_general_params.host_port = port;
- break;
- }
- }
+ rc = sja1105_init_topology(priv, &default_general_params);
+ if (rc)
+ return rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@ -709,19 +889,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
sja1110_select_tdmaconfigidx(priv);
- /* Link-local traffic received on casc_port will be forwarded
- * to host_port without embedding the source port and device ID
- * info in the destination MAC address, and no RX timestamps will be
- * taken either (presumably because it is a cascaded port and a
- * downstream SJA switch already did that).
- * To disable the feature, we need to do different things depending on
- * switch generation. On SJA1105 we need to set an invalid port, while
- * on SJA1110 which support multiple cascaded ports, this field is a
- * bitmask so it must be left zero.
- */
- if (!priv->info->multiple_cascade_ports)
- general_params->casc_port = ds->num_ports;
-
return 0;
}
@@ -849,7 +1016,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
for (port = 0; port < ds->num_ports; port++) {
int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
- if (dsa_is_cpu_port(priv->ds, port))
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
mtu += VLAN_HLEN;
policing[port].smax = 65535; /* Burst size in bytes */
@@ -1568,18 +1735,6 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
- * so the switch still does some VLAN processing internally.
- * But Shared VLAN Learning (SVL) is also active, and it will take
- * care of autonomous forwarding between the unique pvid's of each
- * port. Here we just make sure that users can't add duplicate FDB
- * entries when in this mode - the actual VID doesn't matter except
- * for what gets printed in 'bridge fdb show'. In the case of zero,
- * no VID gets printed at all.
- */
- if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
- vid = 0;
-
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
@@ -1588,9 +1743,6 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
- vid = 0;
-
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
@@ -1633,7 +1785,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
+ if (!priv->vlan_aware)
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -1642,6 +1794,46 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static void sja1105_fast_age(struct dsa_switch *ds, int port)
+{
+ struct sja1105_private *priv = ds->priv;
+ int i;
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -ENOENT)
+ continue;
+ if (rc) {
+ dev_err(ds->dev, "Failed to read FDB: %pe\n",
+ ERR_PTR(rc));
+ return;
+ }
+
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+
+ /* Don't delete static FDB entries */
+ if (l2_lookup.lockeds)
+ continue;
+
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid);
+ if (rc) {
+ dev_err(ds->dev,
+ "Failed to delete FDB entry %pM vid %lld: %pe\n",
+ macaddr, l2_lookup.vlanid, ERR_PTR(rc));
+ return;
+ }
+ }
+}
+
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1740,12 +1932,17 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
if (rc)
return rc;
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+
return sja1105_manage_flood_domains(priv);
}
static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct sja1105_mac_config_entry *mac;
@@ -1771,12 +1968,12 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
- mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
+ mac[port].dyn_learn = dp->learning;
break;
case BR_STATE_FORWARDING:
mac[port].ingress = true;
mac[port].egress = true;
- mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
+ mac[port].dyn_learn = dp->learning;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -2039,97 +2236,6 @@ out:
return rc;
}
-static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
-{
- struct sja1105_mac_config_entry *mac;
-
- mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
-
- mac[port].vlanid = pvid;
-
- return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
- &mac[port], true);
-}
-
-static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
- int tree_index, int sw_index,
- int other_port, struct net_device *br)
-{
- struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
- struct sja1105_private *other_priv = other_ds->priv;
- struct sja1105_private *priv = ds->priv;
- int port, rc;
-
- if (other_ds->ops != &sja1105_switch_ops)
- return 0;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_user_port(ds, port))
- continue;
- if (dsa_to_port(ds, port)->bridge_dev != br)
- continue;
-
- rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
- port,
- other_priv->dsa_8021q_ctx,
- other_port);
- if (rc)
- return rc;
-
- rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
- other_port,
- priv->dsa_8021q_ctx,
- port);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
- int tree_index, int sw_index,
- int other_port,
- struct net_device *br)
-{
- struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
- struct sja1105_private *other_priv = other_ds->priv;
- struct sja1105_private *priv = ds->priv;
- int port;
-
- if (other_ds->ops != &sja1105_switch_ops)
- return;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_user_port(ds, port))
- continue;
- if (dsa_to_port(ds, port)->bridge_dev != br)
- continue;
-
- dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
- other_priv->dsa_8021q_ctx,
- other_port);
-
- dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
- other_port,
- priv->dsa_8021q_ctx, port);
- }
-}
-
-static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
-{
- struct sja1105_private *priv = ds->priv;
- int rc;
-
- rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
- if (rc)
- return rc;
-
- dev_info(ds->dev, "%s switch tagging\n",
- enabled ? "Enabled" : "Disabled");
- return 0;
-}
-
static enum dsa_tag_protocol
sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -2139,669 +2245,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
return priv->info->tag_proto;
}
-static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
-{
- int subvlan;
-
- if (pvid)
- return 0;
-
- for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- if (subvlan_map[subvlan] == VLAN_N_VID)
- return subvlan;
-
- return -1;
-}
-
-static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
-{
- int subvlan;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- if (subvlan_map[subvlan] == vid)
- return subvlan;
-
- return -1;
-}
-
-static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
- int port, u16 vid)
-{
- struct sja1105_port *sp = &priv->ports[port];
-
- return sja1105_find_subvlan(sp->subvlan_map, vid);
-}
-
-static void sja1105_init_subvlan_map(u16 *subvlan_map)
-{
- int subvlan;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- subvlan_map[subvlan] = VLAN_N_VID;
-}
-
-static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
- u16 *subvlan_map)
-{
- struct sja1105_port *sp = &priv->ports[port];
- int subvlan;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- sp->subvlan_map[subvlan] = subvlan_map[subvlan];
-}
-
-static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
-{
- struct sja1105_vlan_lookup_entry *vlan;
- int count, i;
-
- vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
- count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
-
- for (i = 0; i < count; i++)
- if (vlan[i].vlanid == vid)
- return i;
-
- /* Return an invalid entry index if not found */
- return -1;
-}
-
-static int
-sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
- int count, int from_port, u16 from_vid,
- u16 to_vid)
-{
- int i;
-
- for (i = 0; i < count; i++)
- if (retagging[i].ing_port == BIT(from_port) &&
- retagging[i].vlan_ing == from_vid &&
- retagging[i].vlan_egr == to_vid)
- return i;
-
- /* Return an invalid entry index if not found */
- return -1;
-}
-
-static int sja1105_commit_vlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan,
- struct sja1105_retagging_entry *new_retagging,
- int num_retagging)
-{
- struct sja1105_retagging_entry *retagging;
- struct sja1105_vlan_lookup_entry *vlan;
- struct sja1105_table *table;
- int num_vlans = 0;
- int rc, i, k = 0;
-
- /* VLAN table */
- table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- vlan = table->entries;
-
- for (i = 0; i < VLAN_N_VID; i++) {
- int match = sja1105_is_vlan_configured(priv, i);
-
- if (new_vlan[i].vlanid != VLAN_N_VID)
- num_vlans++;
-
- if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
- /* Was there before, no longer is. Delete */
- dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
- rc = sja1105_dynamic_config_write(priv,
- BLK_IDX_VLAN_LOOKUP,
- i, &vlan[match], false);
- if (rc < 0)
- return rc;
- } else if (new_vlan[i].vlanid != VLAN_N_VID) {
- /* Nothing changed, don't do anything */
- if (match >= 0 &&
- vlan[match].vlanid == new_vlan[i].vlanid &&
- vlan[match].tag_port == new_vlan[i].tag_port &&
- vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
- vlan[match].vmemb_port == new_vlan[i].vmemb_port)
- continue;
- /* Update entry */
- dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
- rc = sja1105_dynamic_config_write(priv,
- BLK_IDX_VLAN_LOOKUP,
- i, &new_vlan[i],
- true);
- if (rc < 0)
- return rc;
- }
- }
-
- if (table->entry_count)
- kfree(table->entries);
-
- table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
- GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = num_vlans;
- vlan = table->entries;
-
- for (i = 0; i < VLAN_N_VID; i++) {
- if (new_vlan[i].vlanid == VLAN_N_VID)
- continue;
- vlan[k++] = new_vlan[i];
- }
-
- /* VLAN Retagging Table */
- table = &priv->static_config.tables[BLK_IDX_RETAGGING];
- retagging = table->entries;
-
- for (i = 0; i < table->entry_count; i++) {
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
- i, &retagging[i], false);
- if (rc)
- return rc;
- }
-
- if (table->entry_count)
- kfree(table->entries);
-
- table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
- GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = num_retagging;
- retagging = table->entries;
-
- for (i = 0; i < num_retagging; i++) {
- retagging[i] = new_retagging[i];
-
- /* Update entry */
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
- i, &retagging[i], true);
- if (rc < 0)
- return rc;
- }
-
- return 0;
-}
-
-struct sja1105_crosschip_vlan {
- struct list_head list;
- u16 vid;
- bool untagged;
- int port;
- int other_port;
- struct dsa_8021q_context *other_ctx;
-};
-
-struct sja1105_crosschip_switch {
- struct list_head list;
- struct dsa_8021q_context *other_ctx;
-};
-
-static int sja1105_commit_pvid(struct sja1105_private *priv)
-{
- struct sja1105_bridge_vlan *v;
- struct list_head *vlan_list;
- int rc = 0;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- vlan_list = &priv->bridge_vlans;
- else
- vlan_list = &priv->dsa_8021q_vlans;
-
- list_for_each_entry(v, vlan_list, list) {
- if (v->pvid) {
- rc = sja1105_pvid_apply(priv, v->port, v->vid);
- if (rc)
- break;
- }
- }
-
- return rc;
-}
-
-static int
-sja1105_build_bridge_vlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan)
-{
- struct sja1105_bridge_vlan *v;
-
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
- return 0;
-
- list_for_each_entry(v, &priv->bridge_vlans, list) {
- int match = v->vid;
-
- new_vlan[match].vlanid = v->vid;
- new_vlan[match].vmemb_port |= BIT(v->port);
- new_vlan[match].vlan_bc |= BIT(v->port);
- if (!v->untagged)
- new_vlan[match].tag_port |= BIT(v->port);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
- }
-
- return 0;
-}
-
-static int
-sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan)
-{
- struct sja1105_bridge_vlan *v;
-
- if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- return 0;
-
- list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
- int match = v->vid;
-
- new_vlan[match].vlanid = v->vid;
- new_vlan[match].vmemb_port |= BIT(v->port);
- new_vlan[match].vlan_bc |= BIT(v->port);
- if (!v->untagged)
- new_vlan[match].tag_port |= BIT(v->port);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
- }
-
- return 0;
-}
-
-static int sja1105_build_subvlans(struct sja1105_private *priv,
- u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
- struct sja1105_vlan_lookup_entry *new_vlan,
- struct sja1105_retagging_entry *new_retagging,
- int *num_retagging)
-{
- struct sja1105_bridge_vlan *v;
- int k = *num_retagging;
-
- if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
- return 0;
-
- list_for_each_entry(v, &priv->bridge_vlans, list) {
- int upstream = dsa_upstream_port(priv->ds, v->port);
- int match, subvlan;
- u16 rx_vid;
-
- /* Only sub-VLANs on user ports need to be applied.
- * Bridge VLANs also include VLANs added automatically
- * by DSA on the CPU port.
- */
- if (!dsa_is_user_port(priv->ds, v->port))
- continue;
-
- subvlan = sja1105_find_subvlan(subvlan_map[v->port],
- v->vid);
- if (subvlan < 0) {
- subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
- v->pvid);
- if (subvlan < 0) {
- dev_err(priv->ds->dev, "No more free subvlans\n");
- return -ENOSPC;
- }
- }
-
- rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
-
- /* @v->vid on @v->port needs to be retagged to @rx_vid
- * on @upstream. Assume @v->vid on @v->port and on
- * @upstream was already configured by the previous
- * iteration over bridge_vlans.
- */
- match = rx_vid;
- new_vlan[match].vlanid = rx_vid;
- new_vlan[match].vmemb_port |= BIT(v->port);
- new_vlan[match].vmemb_port |= BIT(upstream);
- new_vlan[match].vlan_bc |= BIT(v->port);
- new_vlan[match].vlan_bc |= BIT(upstream);
- /* The "untagged" flag is set the same as for the
- * original VLAN
- */
- if (!v->untagged)
- new_vlan[match].tag_port |= BIT(v->port);
- /* But it's always tagged towards the CPU */
- new_vlan[match].tag_port |= BIT(upstream);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
-
- /* The Retagging Table generates packet *clones* with
- * the new VLAN. This is a very odd hardware quirk
- * which we need to suppress by dropping the original
- * packet.
- * Deny egress of the original VLAN towards the CPU
- * port. This will force the switch to drop it, and
- * we'll see only the retagged packets.
- */
- match = v->vid;
- new_vlan[match].vlan_bc &= ~BIT(upstream);
-
- /* And the retagging itself */
- new_retagging[k].vlan_ing = v->vid;
- new_retagging[k].vlan_egr = rx_vid;
- new_retagging[k].ing_port = BIT(v->port);
- new_retagging[k].egr_port = BIT(upstream);
- if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
- dev_err(priv->ds->dev, "No more retagging rules\n");
- return -ENOSPC;
- }
-
- subvlan_map[v->port][subvlan] = v->vid;
- }
-
- *num_retagging = k;
-
- return 0;
-}
-
-/* Sadly, in crosschip scenarios where the CPU port is also the link to another
- * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
- * the CPU port of neighbour switches.
- */
-static int
-sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
- struct sja1105_vlan_lookup_entry *new_vlan,
- struct sja1105_retagging_entry *new_retagging,
- int *num_retagging)
-{
- struct sja1105_crosschip_vlan *tmp, *pos;
- struct dsa_8021q_crosschip_link *c;
- struct sja1105_bridge_vlan *v, *w;
- struct list_head crosschip_vlans;
- int k = *num_retagging;
- int rc = 0;
-
- if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
- return 0;
-
- INIT_LIST_HEAD(&crosschip_vlans);
-
- list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
- struct sja1105_private *other_priv = c->other_ctx->ds->priv;
-
- if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
- continue;
-
- /* Crosschip links are also added to the CPU ports.
- * Ignore those.
- */
- if (!dsa_is_user_port(priv->ds, c->port))
- continue;
- if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
- continue;
-
- /* Search for VLANs on the remote port */
- list_for_each_entry(v, &other_priv->bridge_vlans, list) {
- bool already_added = false;
- bool we_have_it = false;
-
- if (v->port != c->other_port)
- continue;
-
- /* If @v is a pvid on @other_ds, it does not need
- * re-retagging, because its SVL field is 0 and we
- * already allow that, via the dsa_8021q crosschip
- * links.
- */
- if (v->pvid)
- continue;
-
- /* Search for the VLAN on our local port */
- list_for_each_entry(w, &priv->bridge_vlans, list) {
- if (w->port == c->port && w->vid == v->vid) {
- we_have_it = true;
- break;
- }
- }
-
- if (!we_have_it)
- continue;
-
- list_for_each_entry(tmp, &crosschip_vlans, list) {
- if (tmp->vid == v->vid &&
- tmp->untagged == v->untagged &&
- tmp->port == c->port &&
- tmp->other_port == v->port &&
- tmp->other_ctx == c->other_ctx) {
- already_added = true;
- break;
- }
- }
-
- if (already_added)
- continue;
-
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp) {
- dev_err(priv->ds->dev, "Failed to allocate memory\n");
- rc = -ENOMEM;
- goto out;
- }
- tmp->vid = v->vid;
- tmp->port = c->port;
- tmp->other_port = v->port;
- tmp->other_ctx = c->other_ctx;
- tmp->untagged = v->untagged;
- list_add(&tmp->list, &crosschip_vlans);
- }
- }
-
- list_for_each_entry(tmp, &crosschip_vlans, list) {
- struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
- int upstream = dsa_upstream_port(priv->ds, tmp->port);
- int match, subvlan;
- u16 rx_vid;
-
- subvlan = sja1105_find_committed_subvlan(other_priv,
- tmp->other_port,
- tmp->vid);
- /* If this happens, it's a bug. The neighbour switch does not
- * have a subvlan for tmp->vid on tmp->other_port, but it
- * should, since we already checked for its vlan_state.
- */
- if (WARN_ON(subvlan < 0)) {
- rc = -EINVAL;
- goto out;
- }
-
- rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
- tmp->other_port,
- subvlan);
-
- /* The @rx_vid retagged from @tmp->vid on
- * {@tmp->other_ds, @tmp->other_port} needs to be
- * re-retagged to @tmp->vid on the way back to us.
- *
- * Assume the original @tmp->vid is already configured
- * on this local switch, otherwise we wouldn't be
- * retagging its subvlan on the other switch in the
- * first place. We just need to add a reverse retagging
- * rule for @rx_vid and install @rx_vid on our ports.
- */
- match = rx_vid;
- new_vlan[match].vlanid = rx_vid;
- new_vlan[match].vmemb_port |= BIT(tmp->port);
- new_vlan[match].vmemb_port |= BIT(upstream);
- /* The "untagged" flag is set the same as for the
- * original VLAN. And towards the CPU, it doesn't
- * really matter, because @rx_vid will only receive
- * traffic on that port. For consistency with other dsa_8021q
- * VLANs, we'll keep the CPU port tagged.
- */
- if (!tmp->untagged)
- new_vlan[match].tag_port |= BIT(tmp->port);
- new_vlan[match].tag_port |= BIT(upstream);
- new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
- /* Deny egress of @rx_vid towards our front-panel port.
- * This will force the switch to drop it, and we'll see
- * only the re-retagged packets (having the original,
- * pre-initial-retagging, VLAN @tmp->vid).
- */
- new_vlan[match].vlan_bc &= ~BIT(tmp->port);
-
- /* On reverse retagging, the same ingress VLAN goes to multiple
- * ports. So we have an opportunity to create composite rules
- * to not waste the limited space in the retagging table.
- */
- k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
- upstream, rx_vid, tmp->vid);
- if (k < 0) {
- if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
- dev_err(priv->ds->dev, "No more retagging rules\n");
- rc = -ENOSPC;
- goto out;
- }
- k = (*num_retagging)++;
- }
- /* And the retagging itself */
- new_retagging[k].vlan_ing = rx_vid;
- new_retagging[k].vlan_egr = tmp->vid;
- new_retagging[k].ing_port = BIT(upstream);
- new_retagging[k].egr_port |= BIT(tmp->port);
- }
-
-out:
- list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
- list_del(&tmp->list);
- kfree(tmp);
- }
-
- return rc;
-}
-
-static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
-
-static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
-{
- struct sja1105_crosschip_switch *s, *pos;
- struct list_head crosschip_switches;
- struct dsa_8021q_crosschip_link *c;
- int rc = 0;
-
- INIT_LIST_HEAD(&crosschip_switches);
-
- list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
- bool already_added = false;
-
- list_for_each_entry(s, &crosschip_switches, list) {
- if (s->other_ctx == c->other_ctx) {
- already_added = true;
- break;
- }
- }
-
- if (already_added)
- continue;
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (!s) {
- dev_err(priv->ds->dev, "Failed to allocate memory\n");
- rc = -ENOMEM;
- goto out;
- }
- s->other_ctx = c->other_ctx;
- list_add(&s->list, &crosschip_switches);
- }
-
- list_for_each_entry(s, &crosschip_switches, list) {
- struct sja1105_private *other_priv = s->other_ctx->ds->priv;
-
- rc = sja1105_build_vlan_table(other_priv, false);
- if (rc)
- goto out;
- }
-
-out:
- list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
- list_del(&s->list);
- kfree(s);
- }
-
- return rc;
-}
-
-static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
-{
- u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
- struct sja1105_retagging_entry *new_retagging;
- struct sja1105_vlan_lookup_entry *new_vlan;
- struct sja1105_table *table;
- int i, num_retagging = 0;
- int rc;
-
- table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- new_vlan = kcalloc(VLAN_N_VID,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!new_vlan)
- return -ENOMEM;
-
- table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!new_retagging) {
- kfree(new_vlan);
- return -ENOMEM;
- }
-
- for (i = 0; i < VLAN_N_VID; i++)
- new_vlan[i].vlanid = VLAN_N_VID;
-
- for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
- new_retagging[i].vlan_ing = VLAN_N_VID;
-
- for (i = 0; i < priv->ds->num_ports; i++)
- sja1105_init_subvlan_map(subvlan_map[i]);
-
- /* Bridge VLANs */
- rc = sja1105_build_bridge_vlans(priv, new_vlan);
- if (rc)
- goto out;
-
- /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
- * - RX VLANs
- * - TX VLANs
- * - Crosschip links
- */
- rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
- if (rc)
- goto out;
-
- /* Private VLANs necessary for dsa_8021q operation, which we need to
- * determine on our own:
- * - Sub-VLANs
- * - Sub-VLANs of crosschip switches
- */
- rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
- &num_retagging);
- if (rc)
- goto out;
-
- rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
- &num_retagging);
- if (rc)
- goto out;
-
- rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
- if (rc)
- goto out;
-
- rc = sja1105_commit_pvid(priv);
- if (rc)
- goto out;
-
- for (i = 0; i < priv->ds->num_ports; i++)
- sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
-
- if (notify) {
- rc = sja1105_notify_crosschip_switches(priv);
- if (rc)
- goto out;
- }
-
-out:
- kfree(new_vlan);
- kfree(new_retagging);
-
- return rc;
-}
-
/* The TPID setting belongs to the General Parameters table,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
@@ -2812,10 +2255,8 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
- enum sja1105_vlan_state state;
struct sja1105_table *table;
struct sja1105_rule *rule;
- bool want_tagging;
u16 tpid, tpid2;
int rc;
@@ -2846,19 +2287,10 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
sp->xmit_tpid = ETH_P_SJA1105;
}
- if (!enabled)
- state = SJA1105_VLAN_UNAWARE;
- else if (priv->best_effort_vlan_filtering)
- state = SJA1105_VLAN_BEST_EFFORT;
- else
- state = SJA1105_VLAN_FILTERING_FULL;
-
- if (priv->vlan_state == state)
+ if (priv->vlan_aware == enabled)
return 0;
- priv->vlan_state = state;
- want_tagging = (state == SJA1105_VLAN_UNAWARE ||
- state == SJA1105_VLAN_BEST_EFFORT);
+ priv->vlan_aware = enabled;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
@@ -2872,8 +2304,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
- want_tagging = priv->best_effort_vlan_filtering || !enabled;
-
/* VLAN filtering => independent VLAN learning.
* No VLAN filtering (or best effort) => shared VLAN learning.
*
@@ -2894,132 +2324,143 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = want_tagging;
+ l2_lookup_params->shared_learn = !priv->vlan_aware;
- sja1105_frame_memory_partitioning(priv);
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
- rc = sja1105_build_vlan_table(priv, false);
- if (rc)
- return rc;
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+ }
rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
if (rc)
NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
- /* Switch port identification based on 802.1Q is only passable
- * if we are not under a vlan_filtering bridge. So make sure
- * the two configurations are mutually exclusive (of course, the
- * user may know better, i.e. best_effort_vlan_filtering).
- */
- return sja1105_setup_8021q_tagging(ds, want_tagging);
+ return rc;
}
-/* Returns number of VLANs added (0 or 1) on success,
- * or a negative error code.
- */
-static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
- u16 flags, struct list_head *vlan_list)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct sja1105_bridge_vlan *v;
-
- list_for_each_entry(v, vlan_list, list) {
- if (v->port == port && v->vid == vid) {
- /* Already added */
- if (v->untagged == untagged && v->pvid == pvid)
- /* Nothing changed */
- return 0;
-
- /* It's the same VLAN, but some of the flags changed
- * and the user did not bother to delete it first.
- * Update it and trigger sja1105_build_vlan_table.
- */
- v->untagged = untagged;
- v->pvid = pvid;
- return 1;
- }
- }
+static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid,
+ u16 flags)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ int match, rc;
- v = kzalloc(sizeof(*v), GFP_KERNEL);
- if (!v) {
- dev_err(ds->dev, "Out of memory while storing VLAN\n");
- return -ENOMEM;
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ if (match < 0) {
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+ match = table->entry_count - 1;
}
- v->port = port;
- v->vid = vid;
- v->untagged = untagged;
- v->pvid = pvid;
- list_add(&v->list, vlan_list);
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
- return 1;
+ vlan[match].type_entry = SJA1110_VLAN_D_TAG;
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc |= BIT(port);
+ vlan[match].vmemb_port |= BIT(port);
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ vlan[match].tag_port &= ~BIT(port);
+ else
+ vlan[match].tag_port |= BIT(port);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], true);
}
-/* Returns number of VLANs deleted (0 or 1) */
-static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
- struct list_head *vlan_list)
+static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid)
{
- struct sja1105_bridge_vlan *v, *n;
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ bool keep = true;
+ int match, rc;
- list_for_each_entry_safe(v, n, vlan_list, list) {
- if (v->port == port && v->vid == vid) {
- list_del(&v->list);
- kfree(v);
- return 1;
- }
- }
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ /* Can't delete a missing entry. */
+ if (match < 0)
+ return 0;
+
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc &= ~BIT(port);
+ vlan[match].vmemb_port &= ~BIT(port);
+ /* Also unset tag_port, just so we don't have a confusing bitmap
+ * (no practical purpose).
+ */
+ vlan[match].tag_port &= ~BIT(port);
+
+ /* If there's no port left as member of this VLAN,
+ * it's time for it to go.
+ */
+ if (!vlan[match].vmemb_port)
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], keep);
+ if (rc < 0)
+ return rc;
+
+ if (!keep)
+ return sja1105_table_delete_entry(table, match);
return 0;
}
-static int sja1105_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- bool vlan_table_changed = false;
+ u16 flags = vlan->flags;
int rc;
- /* If the user wants best-effort VLAN filtering (aka vlan_filtering
- * bridge plus tagging), be sure to at least deny alterations to the
- * configuration done by dsa_8021q.
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
*/
- if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
- vid_is_dsa_8021q(vlan->vid)) {
+ if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
"Range 1024-3071 reserved for dsa_8021q operation");
return -EBUSY;
}
- rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
- &priv->bridge_vlans);
- if (rc < 0)
+ /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ flags = 0;
+
+ rc = sja1105_vlan_add(priv, port, vlan->vid, flags);
+ if (rc)
return rc;
- if (rc > 0)
- vlan_table_changed = true;
- if (!vlan_table_changed)
- return 0;
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ priv->bridge_pvid[port] = vlan->vid;
- return sja1105_build_vlan_table(priv, true);
+ return sja1105_commit_pvid(ds, port);
}
-static int sja1105_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct sja1105_private *priv = ds->priv;
- bool vlan_table_changed = false;
int rc;
- rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
- if (rc > 0)
- vlan_table_changed = true;
-
- if (!vlan_table_changed)
- return 0;
+ rc = sja1105_vlan_del(priv, port, vlan->vid);
+ if (rc)
+ return rc;
- return sja1105_build_vlan_table(priv, true);
+ /* In case the pvid was deleted, make sure that untagged packets will
+ * be dropped.
+ */
+ return sja1105_commit_pvid(ds, port);
}
static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
@@ -3028,180 +2469,48 @@ static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
struct sja1105_private *priv = ds->priv;
int rc;
- rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
- if (rc <= 0)
+ rc = sja1105_vlan_add(priv, port, vid, flags);
+ if (rc)
return rc;
- return sja1105_build_vlan_table(priv, true);
+ if (flags & BRIDGE_VLAN_INFO_PVID)
+ priv->tag_8021q_pvid[port] = vid;
+
+ return sja1105_commit_pvid(ds, port);
}
static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- int rc;
-
- rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
- if (!rc)
- return 0;
- return sja1105_build_vlan_table(priv, true);
+ return sja1105_vlan_del(priv, port, vid);
}
-static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
- .vlan_add = sja1105_dsa_8021q_vlan_add,
- .vlan_del = sja1105_dsa_8021q_vlan_del,
-};
-
-/* The programming model for the SJA1105 switch is "all-at-once" via static
- * configuration tables. Some of these can be dynamically modified at runtime,
- * but not the xMII mode parameters table.
- * Furthermode, some PHYs may not have crystals for generating their clocks
- * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
- * ref_clk pin. So port clocking needs to be initialized early, before
- * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
- * Setting correct PHY link speed does not matter now.
- * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
- * bindings are not yet parsed by DSA core. We need to parse early so that we
- * can populate the xMII mode parameters table.
- */
-static int sja1105_setup(struct dsa_switch *ds)
+static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
{
- struct sja1105_private *priv = ds->priv;
- int rc;
+ struct netlink_ext_ack *extack = info->info.extack;
+ struct net_device *upper = info->upper_dev;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
- rc = sja1105_parse_dt(priv);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
- return rc;
- }
-
- /* Error out early if internal delays are required through DT
- * and we can't apply them.
- */
- rc = sja1105_parse_rgmii_delays(priv);
- if (rc < 0) {
- dev_err(ds->dev, "RGMII delay not supported\n");
- return rc;
- }
-
- rc = sja1105_ptp_clock_register(ds);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
- return rc;
- }
-
- rc = sja1105_mdiobus_register(ds);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
- ERR_PTR(rc));
- goto out_ptp_clock_unregister;
- }
-
- if (priv->info->disable_microcontroller) {
- rc = priv->info->disable_microcontroller(priv);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to disable microcontroller: %pe\n",
- ERR_PTR(rc));
- goto out_mdiobus_unregister;
- }
- }
-
- /* Create and send configuration down to device */
- rc = sja1105_static_config_load(priv);
- if (rc < 0) {
- dev_err(ds->dev, "Failed to load static config: %d\n", rc);
- goto out_mdiobus_unregister;
+ if (is_vlan_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported");
+ return -EBUSY;
}
- /* Configure the CGU (PHY link modes and speeds) */
- if (priv->info->clocking_setup) {
- rc = priv->info->clocking_setup(priv);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to configure MII clocking: %pe\n",
- ERR_PTR(rc));
- goto out_static_config_free;
+ if (netif_is_bridge_master(upper)) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_dev && dp->bridge_dev != upper &&
+ br_vlan_enabled(dp->bridge_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
}
}
- /* On SJA1105, VLAN filtering per se is always enabled in hardware.
- * The only thing we can do to disable it is lie about what the 802.1Q
- * EtherType is.
- * So it will still try to apply VLAN filtering, but all ingress
- * traffic (except frames received with EtherType of ETH_P_SJA1105)
- * will be internally tagged with a distorted VLAN header where the
- * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
- */
- ds->vlan_filtering_is_global = true;
-
- /* Advertise the 8 egress queues */
- ds->num_tx_queues = SJA1105_NUM_TC;
-
- ds->mtu_enforcement_ingress = true;
-
- priv->best_effort_vlan_filtering = true;
-
- rc = sja1105_devlink_setup(ds);
- if (rc < 0)
- goto out_static_config_free;
-
- /* The DSA/switchdev model brings up switch ports in standalone mode by
- * default, and that means vlan_filtering is 0 since they're not under
- * a bridge, so it's safe to set up switch tagging at this time.
- */
- rtnl_lock();
- rc = sja1105_setup_8021q_tagging(ds, true);
- rtnl_unlock();
- if (rc)
- goto out_devlink_teardown;
-
return 0;
-
-out_devlink_teardown:
- sja1105_devlink_teardown(ds);
-out_mdiobus_unregister:
- sja1105_mdiobus_unregister(ds);
-out_ptp_clock_unregister:
- sja1105_ptp_clock_unregister(ds);
-out_static_config_free:
- sja1105_static_config_free(&priv->static_config);
-
- return rc;
-}
-
-static void sja1105_teardown(struct dsa_switch *ds)
-{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_bridge_vlan *v, *n;
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- if (sp->xmit_worker)
- kthread_destroy_worker(sp->xmit_worker);
- }
-
- sja1105_devlink_teardown(ds);
- sja1105_mdiobus_unregister(ds);
- sja1105_flower_teardown(ds);
- sja1105_tas_teardown(ds);
- sja1105_ptp_clock_unregister(ds);
- sja1105_static_config_free(&priv->static_config);
-
- list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
- list_del(&v->list);
- kfree(v);
- }
-
- list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
- list_del(&v->list);
- kfree(v);
- }
}
static void sja1105_port_disable(struct dsa_switch *ds, int port)
@@ -3337,7 +2646,7 @@ static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
- if (dsa_is_cpu_port(ds, port))
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
new_mtu += VLAN_HLEN;
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
@@ -3484,23 +2793,13 @@ static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
bool enabled)
{
struct sja1105_mac_config_entry *mac;
- int rc;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
mac[port].dyn_learn = enabled;
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
- &mac[port], true);
- if (rc)
- return rc;
-
- if (enabled)
- priv->learn_ena |= BIT(port);
- else
- priv->learn_ena &= ~BIT(port);
-
- return 0;
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
}
static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
@@ -3616,7 +2915,190 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
return 0;
}
-static const struct dsa_switch_ops sja1105_switch_ops = {
+static void sja1105_teardown_ports(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (sp->xmit_worker)
+ kthread_destroy_worker(sp->xmit_worker);
+ }
+}
+
+static int sja1105_setup_ports(struct sja1105_private *priv)
+{
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct dsa_switch *ds = priv->ds;
+ int port, rc;
+
+ /* Connections between dsa_port and sja1105_port */
+ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct kthread_worker *worker;
+ struct net_device *slave;
+
+ if (!dsa_port_is_user(dp))
+ continue;
+
+ dp->priv = sp;
+ sp->dp = dp;
+ sp->data = tagger_data;
+ slave = dp->slave;
+ kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
+ worker = kthread_create_worker(0, "%s_xmit", slave->name);
+ if (IS_ERR(worker)) {
+ rc = PTR_ERR(worker);
+ dev_err(ds->dev,
+ "failed to create deferred xmit thread: %d\n",
+ rc);
+ goto out_destroy_workers;
+ }
+ sp->xmit_worker = worker;
+ skb_queue_head_init(&sp->xmit_queue);
+ sp->xmit_tpid = ETH_P_SJA1105;
+ }
+
+ return 0;
+
+out_destroy_workers:
+ sja1105_teardown_ports(priv);
+ return rc;
+}
+
+/* The programming model for the SJA1105 switch is "all-at-once" via static
+ * configuration tables. Some of these can be dynamically modified at runtime,
+ * but not the xMII mode parameters table.
+ * Furthermode, some PHYs may not have crystals for generating their clocks
+ * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
+ * ref_clk pin. So port clocking needs to be initialized early, before
+ * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
+ * Setting correct PHY link speed does not matter now.
+ * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * bindings are not yet parsed by DSA core. We need to parse early so that we
+ * can populate the xMII mode parameters table.
+ */
+static int sja1105_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (priv->info->disable_microcontroller) {
+ rc = priv->info->disable_microcontroller(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to disable microcontroller: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ /* Create and send configuration down to device */
+ rc = sja1105_static_config_load(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+ return rc;
+ }
+
+ /* Configure the CGU (PHY link modes and speeds) */
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to configure MII clocking: %pe\n",
+ ERR_PTR(rc));
+ goto out_static_config_free;
+ }
+ }
+
+ rc = sja1105_setup_ports(priv);
+ if (rc)
+ goto out_static_config_free;
+
+ sja1105_tas_setup(ds);
+ sja1105_flower_setup(ds);
+
+ rc = sja1105_ptp_clock_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
+ goto out_flower_teardown;
+ }
+
+ rc = sja1105_mdiobus_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
+ ERR_PTR(rc));
+ goto out_ptp_clock_unregister;
+ }
+
+ rc = sja1105_devlink_setup(ds);
+ if (rc < 0)
+ goto out_mdiobus_unregister;
+
+ rtnl_lock();
+ rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ /* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ * The only thing we can do to disable it is lie about what the 802.1Q
+ * EtherType is.
+ * So it will still try to apply VLAN filtering, but all ingress
+ * traffic (except frames received with EtherType of ETH_P_SJA1105)
+ * will be internally tagged with a distorted VLAN header where the
+ * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
+ */
+ ds->vlan_filtering_is_global = true;
+ ds->untag_bridge_pvid = true;
+ /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
+ ds->num_fwd_offloading_bridges = 7;
+
+ /* Advertise the 8 egress queues */
+ ds->num_tx_queues = SJA1105_NUM_TC;
+
+ ds->mtu_enforcement_ingress = true;
+ ds->assisted_learning_on_cpu_port = true;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_mdiobus_unregister:
+ sja1105_mdiobus_unregister(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_flower_teardown:
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+ sja1105_teardown_ports(priv);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
+
+ return rc;
+}
+
+static void sja1105_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
+
+ sja1105_devlink_teardown(ds);
+ sja1105_mdiobus_unregister(ds);
+ sja1105_ptp_clock_unregister(ds);
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+ sja1105_teardown_ports(priv);
+ sja1105_static_config_free(&priv->static_config);
+}
+
+const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
@@ -3635,14 +3117,15 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
+ .port_fast_age = sja1105_fast_age,
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
.port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
.port_bridge_flags = sja1105_port_bridge_flags,
.port_stp_state_set = sja1105_bridge_stp_state_set,
.port_vlan_filtering = sja1105_vlan_filtering,
- .port_vlan_add = sja1105_vlan_add,
- .port_vlan_del = sja1105_vlan_del,
+ .port_vlan_add = sja1105_bridge_vlan_add,
+ .port_vlan_del = sja1105_bridge_vlan_del,
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_hwtstamp_get = sja1105_hwtstamp_get,
@@ -3657,12 +3140,14 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.cls_flower_add = sja1105_cls_flower_add,
.cls_flower_del = sja1105_cls_flower_del,
.cls_flower_stats = sja1105_cls_flower_stats,
- .crosschip_bridge_join = sja1105_crosschip_bridge_join,
- .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
- .devlink_param_get = sja1105_devlink_param_get,
- .devlink_param_set = sja1105_devlink_param_set,
.devlink_info_get = sja1105_devlink_info_get,
+ .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
+ .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
+ .port_prechangeupper = sja1105_prechangeupper,
+ .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
+ .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
};
+EXPORT_SYMBOL_GPL(sja1105_switch_ops);
static const struct of_device_id sja1105_dt_ids[];
@@ -3715,12 +3200,11 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
static int sja1105_probe(struct spi_device *spi)
{
- struct sja1105_tagger_data *tagger_data;
struct device *dev = &spi->dev;
struct sja1105_private *priv;
size_t max_xfer, max_msg;
struct dsa_switch *ds;
- int rc, port;
+ int rc;
if (!dev->of_node) {
dev_err(dev, "No DTS bindings for SJA1105 driver\n");
@@ -3800,95 +3284,42 @@ static int sja1105_probe(struct spi_device *spi)
ds->priv = priv;
priv->ds = ds;
- tagger_data = &priv->tagger_data;
-
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
- priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
- GFP_KERNEL);
- if (!priv->dsa_8021q_ctx)
- return -ENOMEM;
-
- priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
- priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
- priv->dsa_8021q_ctx->ds = ds;
-
- INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
- INIT_LIST_HEAD(&priv->bridge_vlans);
- INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
-
- sja1105_tas_setup(ds);
- sja1105_flower_setup(ds);
+ rc = sja1105_parse_dt(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
+ return rc;
+ }
- rc = dsa_register_switch(priv->ds);
- if (rc)
+ /* Error out early if internal delays are required through DT
+ * and we can't apply them.
+ */
+ rc = sja1105_parse_rgmii_delays(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "RGMII delay not supported\n");
return rc;
+ }
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
- if (!priv->cbs) {
- rc = -ENOMEM;
- goto out_unregister_switch;
- }
- }
-
- /* Connections between dsa_port and sja1105_port */
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct net_device *slave;
- int subvlan;
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- dp->priv = sp;
- sp->dp = dp;
- sp->data = tagger_data;
- slave = dp->slave;
- kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
- sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
- slave->name);
- if (IS_ERR(sp->xmit_worker)) {
- rc = PTR_ERR(sp->xmit_worker);
- dev_err(ds->dev,
- "failed to create deferred xmit thread: %d\n",
- rc);
- goto out_destroy_workers;
- }
- skb_queue_head_init(&sp->xmit_queue);
- sp->xmit_tpid = ETH_P_SJA1105;
-
- for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
- sp->subvlan_map[subvlan] = VLAN_N_VID;
- }
-
- return 0;
-
-out_destroy_workers:
- while (port-- > 0) {
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- kthread_destroy_worker(sp->xmit_worker);
+ if (!priv->cbs)
+ return -ENOMEM;
}
-out_unregister_switch:
- dsa_unregister_switch(ds);
-
- return rc;
+ return dsa_register_switch(priv->ds);
}
static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
+ struct dsa_switch *ds = priv->ds;
+
+ dsa_unregister_switch(ds);
- dsa_unregister_switch(priv->ds);
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f6e13e6c6a18..ec7b65daec20 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -496,14 +496,11 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
int rc;
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
- key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
- priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
- key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
@@ -595,14 +592,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
- if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
- key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
- priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
- key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 74263f8efe1a..8ef34901c2d8 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -113,6 +113,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/compat.h>
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -131,7 +132,8 @@
static int eql_open(struct net_device *dev);
static int eql_close(struct net_device *dev);
-static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
#define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
@@ -170,7 +172,7 @@ static const char version[] __initconst =
static const struct net_device_ops eql_netdev_ops = {
.ndo_open = eql_open,
.ndo_stop = eql_close,
- .ndo_do_ioctl = eql_ioctl,
+ .ndo_siocdevprivate = eql_siocdevprivate,
.ndo_start_xmit = eql_slave_xmit,
};
@@ -268,25 +270,29 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
-static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (in_compat_syscall()) /* to be implemented */
+ return -EOPNOTSUPP;
+
switch (cmd) {
case EQL_ENSLAVE:
- return eql_enslave(dev, ifr->ifr_data);
+ return eql_enslave(dev, data);
case EQL_EMANCIPATE:
- return eql_emancipate(dev, ifr->ifr_data);
+ return eql_emancipate(dev, data);
case EQL_GETSLAVECFG:
- return eql_g_slave_cfg(dev, ifr->ifr_data);
+ return eql_g_slave_cfg(dev, data);
case EQL_SETSLAVECFG:
- return eql_s_slave_cfg(dev, ifr->ifr_data);
+ return eql_s_slave_cfg(dev, data);
case EQL_GETMASTRCFG:
- return eql_g_master_cfg(dev, ifr->ifr_data);
+ return eql_g_master_cfg(dev, data);
case EQL_SETMASTRCFG:
- return eql_s_master_cfg(dev, ifr->ifr_data);
+ return eql_s_master_cfg(dev, data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 96cc5fc36eb5..87c906e744fb 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -302,7 +302,6 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev)
return -ENOMEM;
SET_NETDEV_DEV(dev, pdev);
- netdev_boot_setup_check(dev);
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
free_netdev(dev);
@@ -421,7 +420,6 @@ static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
- netdev_boot_setup_check(dev);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP);
pnp_set_drvdata(pdev, dev);
@@ -514,7 +512,9 @@ static int el3_common_init(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int err;
- const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+ static const char * const if_names[] = {
+ "10baseT", "AUI", "undefined", "BNC"
+ };
spin_lock_init(&lp->lock);
@@ -588,7 +588,6 @@ static int el3_eisa_probe(struct device *device)
}
SET_NETDEV_DEV(dev, device);
- netdev_boot_setup_check(dev);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
eisa_set_drvdata (edev, dev);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 47b4215bb93b..8d90fed5d33e 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -407,7 +407,7 @@ MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt
/* we will need locking (and refcounting) if we ever use it for more */
static LIST_HEAD(root_corkscrew_dev);
-int init_module(void)
+static int corkscrew_init_module(void)
{
int found = 0;
if (debug >= 0)
@@ -416,6 +416,7 @@ int init_module(void)
found++;
return found ? 0 : -ENODEV;
}
+module_init(corkscrew_init_module);
#else
struct net_device *tc515_probe(int unit)
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index f66e7fb9a2bb..dd4d3c48b98d 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -252,7 +252,7 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_start_xmit = el3_start_xmit,
.ndo_tx_timeout = el3_tx_timeout,
.ndo_get_stats = el3_get_stats,
- .ndo_do_ioctl = el3_ioctl,
+ .ndo_eth_ioctl = el3_ioctl,
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7d7d3ffe25c3..17c16333a412 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1052,7 +1052,7 @@ static const struct net_device_ops boomrang_netdev_ops = {
.ndo_tx_timeout = vortex_tx_timeout,
.ndo_get_stats = vortex_get_stats,
#ifdef CONFIG_PCI
- .ndo_do_ioctl = vortex_ioctl,
+ .ndo_eth_ioctl = vortex_ioctl,
#endif
.ndo_set_rx_mode = set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
@@ -1069,7 +1069,7 @@ static const struct net_device_ops vortex_netdev_ops = {
.ndo_tx_timeout = vortex_tx_timeout,
.ndo_get_stats = vortex_get_stats,
#ifdef CONFIG_PCI
- .ndo_do_ioctl = vortex_ioctl,
+ .ndo_eth_ioctl = vortex_ioctl,
#endif
.ndo_set_rx_mode = set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index a52a3740f0c9..706bd59bf645 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -34,6 +34,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
depends on ISA && ISA_DMA_API && !PPC32
+ select NETDEV_LEGACY_INIT
help
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 9f4b302fd2ce..a4130e643342 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -102,6 +102,7 @@ config MCF8390
config NE2000
tristate "NE2000/NE1000 support"
depends on (ISA || (Q40 && m) || MACH_TX49XX || ATARI_ETHERNEC)
+ select NETDEV_LEGACY_INIT if ISA
select CRC32
help
If you have a network (Ethernet) card of this type, say Y here.
@@ -169,6 +170,7 @@ config STNIC
config ULTRA
tristate "SMC Ultra support"
depends on ISA
+ select NETDEV_LEGACY_INIT
select CRC32
help
If you have a network (Ethernet) card of this type, say Y here.
@@ -186,6 +188,7 @@ config ULTRA
config WD80x3
tristate "WD80*3 support"
depends on ISA
+ select NETDEV_LEGACY_INIT
select CRC32
help
If you have a network (Ethernet) card of this type, say Y here.
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index fe6c834c422e..da1ae37a9d73 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -75,7 +75,6 @@
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-struct net_device * __init apne_probe(int unit);
static int apne_probe1(struct net_device *dev, int ioaddr);
static void apne_reset_8390(struct net_device *dev);
@@ -120,7 +119,7 @@ static u32 apne_msg_enable;
module_param_named(msg_enable, apne_msg_enable, uint, 0444);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
-struct net_device * __init apne_probe(int unit)
+static struct net_device * __init apne_probe(void)
{
struct net_device *dev;
struct ei_device *ei_local;
@@ -150,10 +149,6 @@ struct net_device * __init apne_probe(int unit)
dev = alloc_ei_netdev();
if (!dev)
return ERR_PTR(-ENOMEM);
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
ei_local = netdev_priv(dev);
ei_local->msg_enable = apne_msg_enable;
@@ -554,12 +549,11 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef MODULE
static struct net_device *apne_dev;
static int __init apne_module_init(void)
{
- apne_dev = apne_probe(-1);
+ apne_dev = apne_probe();
return PTR_ERR_OR_ZERO(apne_dev);
}
@@ -579,7 +573,6 @@ static void __exit apne_module_exit(void)
}
module_init(apne_module_init);
module_exit(apne_module_exit);
-#endif
static int init_pcmcia(void)
{
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 172947fc051a..6c6bdd5913ec 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -101,6 +101,13 @@ static inline struct ax_device *to_ax_dev(struct net_device *dev)
return (struct ax_device *)(ei_local + 1);
}
+void ax_NS8390_reinit(struct net_device *dev)
+{
+ ax_NS8390_init(dev, 1);
+}
+
+EXPORT_SYMBOL_GPL(ax_NS8390_reinit);
+
/*
* ax_initial_check
*
@@ -635,7 +642,7 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
static const struct net_device_ops ax_netdev_ops = {
.ndo_open = ax_open,
.ndo_stop = ax_close,
- .ndo_do_ioctl = ax_ioctl,
+ .ndo_eth_ioctl = ax_ioctl,
.ndo_start_xmit = ax_ei_start_xmit,
.ndo_tx_timeout = ax_ei_tx_timeout,
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 8c321dfc7b3b..3c370e686ec3 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -128,7 +128,7 @@ static inline struct axnet_dev *PRIV(struct net_device *dev)
static const struct net_device_ops axnet_netdev_ops = {
.ndo_open = axnet_open,
.ndo_stop = axnet_close,
- .ndo_do_ioctl = axnet_ioctl,
+ .ndo_eth_ioctl = axnet_ioctl,
.ndo_start_xmit = axnet_start_xmit,
.ndo_tx_timeout = axnet_tx_timeout,
.ndo_get_stats = get_stats,
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index e9756d0ea5b8..53660bc8d6ff 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -923,7 +923,7 @@ static void __init ne_add_devices(void)
}
#ifdef MODULE
-int __init init_module(void)
+static int __init ne_init(void)
{
int retval;
ne_add_devices();
@@ -940,6 +940,7 @@ int __init init_module(void)
ne_loop_rm_unreg(0);
return retval;
}
+module_init(ne_init);
#else /* MODULE */
static int __init ne_init(void)
{
@@ -951,6 +952,7 @@ static int __init ne_init(void)
}
module_init(ne_init);
+#ifdef CONFIG_NETDEV_LEGACY_INIT
struct net_device * __init ne_probe(int unit)
{
int this_dev;
@@ -991,6 +993,7 @@ struct net_device * __init ne_probe(int unit)
return ERR_PTR(-ENODEV);
}
+#endif
#endif /* MODULE */
static void __exit ne_exit(void)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index cac036706382..96ad72abd373 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -223,7 +223,7 @@ static const struct net_device_ops pcnet_netdev_ops = {
.ndo_set_config = set_config,
.ndo_start_xmit = ei_start_xmit,
.ndo_get_stats = ei_get_stats,
- .ndo_do_ioctl = ei_ioctl,
+ .ndo_eth_ioctl = ei_ioctl,
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 1d8ed7357b7f..0890fa493f70 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -522,7 +522,6 @@ static void ultra_pio_input(struct net_device *dev, int count,
/* We know skbuffs are padded to at least word alignment. */
insw(ioaddr + IOPD, buf, (count+1)>>1);
}
-
static void ultra_pio_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
@@ -572,8 +571,7 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
+static int __init ultra_init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
@@ -600,6 +598,7 @@ init_module(void)
return 0;
return -ENXIO;
}
+module_init(ultra_init_module);
static void cleanup_card(struct net_device *dev)
{
@@ -613,8 +612,7 @@ static void cleanup_card(struct net_device *dev)
iounmap(ei_status.mem);
}
-void __exit
-cleanup_module(void)
+static void __exit ultra_cleanup_module(void)
{
int this_dev;
@@ -627,4 +625,5 @@ cleanup_module(void)
}
}
}
+module_exit(ultra_cleanup_module);
#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index c834123560f1..263a942d81fa 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -519,7 +519,7 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int __init init_module(void)
+static int __init wd_init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
@@ -548,6 +548,7 @@ int __init init_module(void)
return 0;
return -ENXIO;
}
+module_init(wd_init_module);
static void cleanup_card(struct net_device *dev)
{
@@ -556,8 +557,7 @@ static void cleanup_card(struct net_device *dev)
iounmap(ei_status.mem);
}
-void __exit
-cleanup_module(void)
+static void __exit wd_cleanup_module(void)
{
int this_dev;
@@ -570,4 +570,5 @@ cleanup_module(void)
}
}
}
+module_exit(wd_cleanup_module);
#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c
index e2c963821ffe..fe7a74707aa4 100644
--- a/drivers/net/ethernet/8390/xsurf100.c
+++ b/drivers/net/ethernet/8390/xsurf100.c
@@ -22,8 +22,6 @@
#define XS100_8390_DATA_WRITE32_BASE 0x0C80
#define XS100_8390_DATA_AREA_SIZE 0x80
-#define __NS8390_init ax_NS8390_init
-
/* force unsigned long back to 'void __iomem *' */
#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
@@ -42,10 +40,7 @@
/* Ensure we have our RCR base value */
#define AX88796_PLATFORM
-static unsigned char version[] =
- "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
-
-#include "lib8390.c"
+#include "8390.h"
/* from ne.c */
#define NE_CMD EI_SHIFT(0x00)
@@ -232,7 +227,7 @@ static void xs100_block_output(struct net_device *dev, int count,
if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ei_local->reset_8390(dev);
- ax_NS8390_init(dev, 1);
+ ax_NS8390_reinit(dev);
break;
}
}
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index b8e771c2bc40..c4ecf4fcadf8 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1179,8 +1179,8 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
return owl_emac_setup_frame_xmit(netdev_priv(netdev));
}
-static int owl_emac_ndo_do_ioctl(struct net_device *netdev,
- struct ifreq *req, int cmd)
+static int owl_emac_ndo_eth_ioctl(struct net_device *netdev,
+ struct ifreq *req, int cmd)
{
if (!netif_running(netdev))
return -EINVAL;
@@ -1224,7 +1224,7 @@ static const struct net_device_ops owl_emac_netdev_ops = {
.ndo_set_rx_mode = owl_emac_ndo_set_rx_mode,
.ndo_set_mac_address = owl_emac_ndo_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = owl_emac_ndo_do_ioctl,
+ .ndo_eth_ioctl = owl_emac_ndo_eth_ioctl,
.ndo_tx_timeout = owl_emac_ndo_tx_timeout,
.ndo_get_stats = owl_emac_ndo_get_stats,
};
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 7965e5e3c985..e0f6cc910bd2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -625,7 +625,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = tx_timeout,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef VLAN_SUPPORT
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 41f8821f792d..920633161174 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3882,7 +3882,7 @@ static const struct net_device_ops et131x_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
};
static int et131x_pci_setup(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index f99ae317c188..037baea1c738 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -774,7 +774,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_tx_timeout = emac_timeout,
.ndo_set_rx_mode = emac_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d0b0609bbe23..4786f0504691 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -46,6 +46,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
depends on ISA && ISA_DMA_API && !ARM && !PPC32
+ select NETDEV_LEGACY_INIT
help
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@@ -132,6 +133,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
depends on ISA && ISA_DMA_API && !ARM && !PPC32
+ select NETDEV_LEGACY_INIT
help
If you have a network (Ethernet) card of this type, say Y here.
@@ -168,11 +170,11 @@ config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
+ depends on PTP_1588_CLOCK_OPTIONAL
select BITREVERSE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
- imply PTP_1588_CLOCK
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 9cac5aa75a73..92e4246dc359 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1729,7 +1729,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
.ndo_set_rx_mode = amd8111e_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = amd8111e_set_mac_address,
- .ndo_do_ioctl = amd8111e_ioctl,
+ .ndo_eth_ioctl = amd8111e_ioctl,
.ndo_change_mtu = amd8111e_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = amd8111e_poll,
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 36f54d13a2eb..9d2f49fd945e 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -367,7 +367,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
}
-struct net_device * __init atarilance_probe(int unit)
+struct net_device * __init atarilance_probe(void)
{
int i;
static int found;
@@ -382,10 +382,6 @@ struct net_device * __init atarilance_probe(int unit)
dev = alloc_etherdev(sizeof(struct lance_private));
if (!dev)
return ERR_PTR(-ENOMEM);
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
for( i = 0; i < N_LANCE_ADDR; ++i ) {
if (lance_probe1( dev, &lance_addr_list[i] )) {
@@ -1137,13 +1133,11 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
return 0;
}
-
-#ifdef MODULE
static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
- atarilance_dev = atarilance_probe(-1);
+ atarilance_dev = atarilance_probe();
return PTR_ERR_OR_ZERO(atarilance_dev);
}
@@ -1155,4 +1149,3 @@ static void __exit atarilance_module_exit(void)
}
module_init(atarilance_module_init);
module_exit(atarilance_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 19e195420e24..9c1636222b99 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1051,7 +1051,7 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_stop = au1000_close,
.ndo_start_xmit = au1000_tx,
.ndo_set_rx_mode = au1000_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = au1000_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 2178e6b89dbd..945bf1d87507 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -327,7 +327,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
-int __init init_module(void)
+static int __init lance_init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
@@ -356,6 +356,7 @@ int __init init_module(void)
return 0;
return -ENXIO;
}
+module_init(lance_init_module);
static void cleanup_card(struct net_device *dev)
{
@@ -368,7 +369,7 @@ static void cleanup_card(struct net_device *dev)
kfree(lp);
}
-void __exit cleanup_module(void)
+static void __exit lance_cleanup_module(void)
{
int this_dev;
@@ -381,6 +382,7 @@ void __exit cleanup_module(void)
}
}
}
+module_exit(lance_cleanup_module);
#endif /* MODULE */
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 3f2e4cdd0b83..da97fccea9ea 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -68,7 +68,7 @@ static const struct net_device_ops lance_netdev_ops = {
};
/* Initialise the one and only on-board 7990 */
-struct net_device * __init mvme147lance_probe(int unit)
+static struct net_device * __init mvme147lance_probe(void)
{
struct net_device *dev;
static int called;
@@ -86,9 +86,6 @@ struct net_device * __init mvme147lance_probe(int unit)
if (!dev)
return ERR_PTR(-ENOMEM);
- if (unit >= 0)
- sprintf(dev->name, "eth%d", unit);
-
/* Fill the dev fields */
dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
dev->netdev_ops = &lance_netdev_ops;
@@ -179,22 +176,21 @@ static int m147lance_close(struct net_device *dev)
return 0;
}
-#ifdef MODULE
MODULE_LICENSE("GPL");
static struct net_device *dev_mvme147_lance;
-int __init init_module(void)
+static int __init m147lance_init(void)
{
- dev_mvme147_lance = mvme147lance_probe(-1);
+ dev_mvme147_lance = mvme147lance_probe();
return PTR_ERR_OR_ZERO(dev_mvme147_lance);
}
+module_init(m147lance_init);
-void __exit cleanup_module(void)
+static void __exit m147lance_exit(void)
{
struct m147lance_private *lp = netdev_priv(dev_mvme147_lance);
unregister_netdev(dev_mvme147_lance);
free_pages(lp->ram, 3);
free_netdev(dev_mvme147_lance);
}
-
-#endif /* MODULE */
+module_exit(m147lance_exit);
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 5c1cfb0c4a42..b5df7ad5a83f 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1230,18 +1230,20 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
MODULE_PARM_DESC(io, "ni6510 I/O base address");
MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
-int __init init_module(void)
+static int __init ni65_init_module(void)
{
dev_ni65 = ni65_probe(-1);
return PTR_ERR_OR_ZERO(dev_ni65);
}
+module_init(ni65_init_module);
-void __exit cleanup_module(void)
+static void __exit ni65_cleanup_module(void)
{
unregister_netdev(dev_ni65);
cleanup_card(dev_ni65);
free_netdev(dev_ni65);
}
+module_exit(ni65_cleanup_module);
#endif /* MODULE */
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 4100ab07e6b7..70d76fdb9f56 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1572,7 +1572,7 @@ static const struct net_device_ops pcnet32_netdev_ops = {
.ndo_tx_timeout = pcnet32_tx_timeout,
.ndo_get_stats = pcnet32_get_stats,
.ndo_set_rx_mode = pcnet32_set_multicast_list,
- .ndo_do_ioctl = pcnet32_ioctl,
+ .ndo_eth_ioctl = pcnet32_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index f8d7a9387a56..4a845bc071b2 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -245,7 +245,7 @@ static void set_multicast_list( struct net_device *dev );
/************************* End of Prototypes **************************/
-struct net_device * __init sun3lance_probe(int unit)
+static struct net_device * __init sun3lance_probe(void)
{
struct net_device *dev;
static int found;
@@ -272,10 +272,6 @@ struct net_device * __init sun3lance_probe(int unit)
dev = alloc_etherdev(sizeof(struct lance_private));
if (!dev)
return ERR_PTR(-ENOMEM);
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
if (!lance_probe(dev))
goto out;
@@ -924,17 +920,16 @@ static void set_multicast_list( struct net_device *dev )
}
-#ifdef MODULE
-
static struct net_device *sun3lance_dev;
-int __init init_module(void)
+static int __init sun3lance_init(void)
{
- sun3lance_dev = sun3lance_probe(-1);
+ sun3lance_dev = sun3lance_probe();
return PTR_ERR_OR_ZERO(sun3lance_dev);
}
+module_init(sun3lance_init);
-void __exit cleanup_module(void)
+static void __exit sun3lance_cleanup(void)
{
unregister_netdev(sun3lance_dev);
#ifdef CONFIG_SUN3
@@ -942,6 +937,4 @@ void __exit cleanup_module(void)
#endif
free_netdev(sun3lance_dev);
}
-
-#endif /* MODULE */
-
+module_exit(sun3lance_cleanup);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4f714f874c4f..17a585adfb49 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2284,7 +2284,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_rx_mode = xgbe_set_rx_mode,
.ndo_set_mac_address = xgbe_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = xgbe_ioctl,
+ .ndo_eth_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
.ndo_tx_timeout = xgbe_tx_timeout,
.ndo_get_stats64 = xgbe_get_stats64,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 4af0cd9530de..e22935ce9573 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -421,7 +421,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
- .ndo_do_ioctl = aq_ndev_ioctl,
+ .ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 67b8113a2b53..38c288ec9059 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -844,7 +844,7 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
.ndo_set_rx_mode = arc_emac_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = arc_emac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 1ba81b1eb6fd..02ae98aabf91 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1851,7 +1851,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 11ef1fbe7aee..4ea157efca86 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1701,7 +1701,7 @@ static const struct net_device_ops alx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = alx_set_mac_address,
.ndo_change_mtu = alx_change_mtu,
- .ndo_do_ioctl = alx_ioctl,
+ .ndo_eth_ioctl = alx_ioctl,
.ndo_tx_timeout = alx_tx_timeout,
.ndo_fix_features = alx_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1c6246a5dc22..3b51b172b317 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2609,7 +2609,7 @@ static const struct net_device_ops atl1c_netdev_ops = {
.ndo_change_mtu = atl1c_change_mtu,
.ndo_fix_features = atl1c_fix_features,
.ndo_set_features = atl1c_set_features,
- .ndo_do_ioctl = atl1c_ioctl,
+ .ndo_eth_ioctl = atl1c_ioctl,
.ndo_tx_timeout = atl1c_tx_timeout,
.ndo_get_stats = atl1c_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2eb0a2ab69f6..753973ac922e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2247,7 +2247,7 @@ static const struct net_device_ops atl1e_netdev_ops = {
.ndo_fix_features = atl1e_fix_features,
.ndo_set_features = atl1e_set_features,
.ndo_change_mtu = atl1e_change_mtu,
- .ndo_do_ioctl = atl1e_ioctl,
+ .ndo_eth_ioctl = atl1e_ioctl,
.ndo_tx_timeout = atl1e_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1e_netpoll,
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index c67201a13cf5..68f6c0bbd945 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2885,7 +2885,7 @@ static const struct net_device_ops atl1_netdev_ops = {
.ndo_change_mtu = atl1_change_mtu,
.ndo_fix_features = atlx_fix_features,
.ndo_set_features = atlx_set_features,
- .ndo_do_ioctl = atlx_ioctl,
+ .ndo_eth_ioctl = atlx_ioctl,
.ndo_tx_timeout = atlx_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1_poll_controller,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 0cc0db04c27d..b69298ddb647 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1293,7 +1293,7 @@ static const struct net_device_ops atl2_netdev_ops = {
.ndo_change_mtu = atl2_change_mtu,
.ndo_fix_features = atl2_fix_features,
.ndo_set_features = atl2_set_features,
- .ndo_do_ioctl = atl2_ioctl,
+ .ndo_eth_ioctl = atl2_ioctl,
.ndo_tx_timeout = atl2_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl2_poll_controller,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 1a02ca600b71..56e0fb07aec7 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -122,8 +122,8 @@ config SB1250_MAC
config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
- imply PTP_1588_CLOCK
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -140,7 +140,7 @@ config TIGON3_HWMON
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
@@ -206,7 +206,7 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select LIBCRC32C
select NET_DEVLINK
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index ad2655efe423..fa784953c601 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2198,7 +2198,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = b44_ioctl,
+ .ndo_eth_ioctl = b44_ioctl,
.ndo_tx_timeout = b44_tx_timeout,
.ndo_change_mtu = b44_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 977f097fc7bf..d56886300ecf 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1699,7 +1699,7 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_set_mac_address = bcm_enet_set_mac_address,
.ndo_set_rx_mode = bcm_enet_set_multicast_list,
- .ndo_do_ioctl = bcm_enet_ioctl,
+ .ndo_eth_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
};
@@ -2446,7 +2446,7 @@ static const struct net_device_ops bcm_enetsw_ops = {
.ndo_stop = bcm_enetsw_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_change_mtu = bcm_enet_change_mtu,
- .ndo_do_ioctl = bcm_enetsw_ioctl,
+ .ndo_eth_ioctl = bcm_enetsw_ioctl,
};
@@ -2649,7 +2649,6 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
if (!res_mem || irq_rx < 0)
return -ENODEV;
- ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 075f6e146b29..fe4d99abd548 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1263,7 +1263,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_rx_mode = bgmac_set_rx_mode,
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = bgmac_change_mtu,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bee6cfad9fc6..89ee1c0e9c79 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8546,7 +8546,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
.ndo_stop = bnx2_close,
.ndo_get_stats64 = bnx2_get_stats64,
.ndo_set_rx_mode = bnx2_set_rx_mode,
- .ndo_do_ioctl = bnx2_ioctl,
+ .ndo_eth_ioctl = bnx2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnx2_change_mac_addr,
.ndo_change_mtu = bnx2_change_mtu,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2acbc73dcd18..6d98134913cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13048,7 +13048,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = bnx2x_validate_addr,
- .ndo_do_ioctl = bnx2x_ioctl,
+ .ndo_eth_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 27943b0446c2..f255fd0b16db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1858,7 +1858,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
{
int i;
int first_queue_query_index, num_queues_req;
- dma_addr_t cur_data_offset;
struct stats_query_entry *cur_query_entry;
u8 stats_count = 0;
bool is_fcoe = false;
@@ -1879,10 +1878,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
first_queue_query_index + num_queues_req);
- cur_data_offset = bp->fw_stats_data_mapping +
- offsetof(struct bnx2x_fw_stats_data, queue_stats) +
- num_queues_req * sizeof(struct per_queue_stats);
-
cur_query_entry = &bp->fw_stats_req->
query[first_queue_query_index + num_queues_req];
@@ -1933,7 +1928,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry->funcID,
j, cur_query_entry->index);
cur_query_entry++;
- cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
/* all stats are coalesced to the leading queue */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8a97640cdfe7..893bdaf03043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -278,6 +278,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
+ ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -2074,6 +2076,19 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
return INVALID_HW_RING_ID;
}
+static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
+{
+ switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
+ netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
+ BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
+ break;
+ default:
+ netdev_err(bp->dev, "FW reported unknown error type\n");
+ break;
+ }
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -2234,6 +2249,14 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
+ bnxt_ptp_pps_event(bp, data1, data2);
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
+ bnxt_event_error_report(bp, data1, data2);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -3176,6 +3199,58 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
+{
+ kfree(cpr->cp_desc_ring);
+ cpr->cp_desc_ring = NULL;
+ kfree(cpr->cp_desc_mapping);
+ cpr->cp_desc_mapping = NULL;
+}
+
+static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
+{
+ cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
+ if (!cpr->cp_desc_ring)
+ return -ENOMEM;
+ cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
+ GFP_KERNEL);
+ if (!cpr->cp_desc_mapping)
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_free_all_cp_arrays(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ if (!bnapi)
+ continue;
+ bnxt_free_cp_arrays(&bnapi->cp_ring);
+ }
+}
+
+static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
+{
+ int i, n = bp->cp_nr_pages;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ int rc;
+
+ if (!bnapi)
+ continue;
+ rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
static void bnxt_free_cp_rings(struct bnxt *bp)
{
int i;
@@ -3203,6 +3278,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
if (cpr2) {
ring = &cpr2->cp_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
+ bnxt_free_cp_arrays(cpr2);
kfree(cpr2);
cpr->cp_ring_arr[j] = NULL;
}
@@ -3221,6 +3297,12 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (!cpr)
return NULL;
+ rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
+ if (rc) {
+ bnxt_free_cp_arrays(cpr);
+ kfree(cpr);
+ return NULL;
+ }
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->cp_nr_pages;
@@ -3231,6 +3313,7 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
rc = bnxt_alloc_ring(bp, rmem);
if (rc) {
bnxt_free_ring(bp, rmem);
+ bnxt_free_cp_arrays(cpr);
kfree(cpr);
cpr = NULL;
}
@@ -3663,9 +3746,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
if (jumbo_factor > agg_factor)
agg_factor = jumbo_factor;
}
- agg_ring_size = ring_size * agg_factor;
+ if (agg_factor) {
+ if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
+ bp->rx_ring_size, ring_size);
+ bp->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
- if (agg_ring_size) {
bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
RX_DESC_CNT);
if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
@@ -4266,6 +4355,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
+ bnxt_free_all_cp_arrays(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
@@ -4386,6 +4476,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
goto alloc_mem_err;
}
+ rc = bnxt_alloc_all_cp_arrays(bp);
+ if (rc)
+ goto alloc_mem_err;
+
bnxt_init_ring_struct(bp);
rc = bnxt_alloc_rx_rings(bp);
@@ -7531,9 +7625,14 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto no_ptp;
}
- return 0;
+ rc = bnxt_ptp_init(bp);
+ if (!rc)
+ return 0;
+
+ netdev_warn(bp->dev, "PTP initialization failed.\n");
no_ptp:
+ bnxt_ptp_clear(bp);
kfree(ptp);
bp->ptp_cfg = NULL;
return rc;
@@ -7576,6 +7675,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
flags_ext = le32_to_cpu(resp->flags_ext);
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7613,6 +7714,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
__bnxt_hwrm_ptp_qcfg(bp);
} else {
+ bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
}
@@ -10316,15 +10418,9 @@ static int bnxt_open(struct net_device *dev)
if (rc)
return rc;
- if (bnxt_ptp_init(bp)) {
- netdev_warn(dev, "PTP initialization failed.\n");
- kfree(bp->ptp_cfg);
- bp->ptp_cfg = NULL;
- }
rc = __bnxt_open_nic(bp, true, true);
if (rc) {
bnxt_hwrm_if_change(bp, false);
- bnxt_ptp_clear(bp);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -10415,7 +10511,6 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- bnxt_ptp_clear(bp);
bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
@@ -11405,7 +11500,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
}
- bnxt_ptp_clear(bp);
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
bnxt_clear_int_mode(bp);
@@ -11441,13 +11535,20 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 wait_dsecs;
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (ptp) {
+ spin_lock_bh(&ptp->ptp_lock);
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ }
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
if (fw_health->master) {
@@ -11503,9 +11604,16 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (ptp) {
+ spin_lock_bh(&ptp->ptp_lock);
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ spin_unlock_bh(&ptp->ptp_lock);
+ } else {
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ }
if (bp->pf.active_vfs &&
!test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
n = bnxt_get_registered_vfs(bp);
@@ -12177,6 +12285,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_reenable_sriov(bp);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
+ bnxt_ptp_reapply_pps(bp);
bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
@@ -12708,7 +12817,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_stop = bnxt_close,
.ndo_get_stats64 = bnxt_get_stats64,
.ndo_set_rx_mode = bnxt_set_rx_mode,
- .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_eth_ioctl = bnxt_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnxt_change_mac_addr,
.ndo_change_mtu = bnxt_change_mtu,
@@ -12747,6 +12856,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
+ bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -13359,6 +13469,7 @@ init_err_pci_clean:
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_ethtool_free(bp);
+ bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
kfree(bp->fw_health);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ba4e0fc38520..7b989b6e4f6e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -496,6 +496,16 @@ struct rx_tpa_end_cmp_ext {
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -586,15 +596,17 @@ struct nqe_cn {
#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
-#define MAX_RX_PAGES 1
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
#define MAX_RX_AGG_PAGES 4
#define MAX_TX_PAGES 1
-#define MAX_CP_PAGES 8
+#define MAX_CP_PAGES 16
#else
-#define MAX_RX_PAGES 8
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
#define MAX_RX_AGG_PAGES 32
#define MAX_TX_PAGES 8
-#define MAX_CP_PAGES 64
+#define MAX_CP_PAGES 128
#endif
#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
@@ -612,6 +624,7 @@ struct nqe_cn {
#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
@@ -963,11 +976,11 @@ struct bnxt_cp_ring_info {
struct dim dim;
union {
- struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
- struct nqe_cn *nq_desc_ring[MAX_CP_PAGES];
+ struct tx_cmp **cp_desc_ring;
+ struct nqe_cn **nq_desc_ring;
};
- dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
+ dma_addr_t *cp_desc_mapping;
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
@@ -1888,6 +1901,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_PTP_PPS 0x10000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 64381be935a8..2cd8bb37e641 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -743,14 +743,17 @@ static void bnxt_dl_params_unregister(struct bnxt *bp)
int bnxt_dl_register(struct bnxt *bp)
{
+ const struct devlink_ops *devlink_ops;
struct devlink_port_attrs attrs = {};
struct devlink *dl;
int rc;
if (BNXT_PF(bp))
- dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+ devlink_ops = &bnxt_dl_ops;
else
- dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
+ devlink_ops = &bnxt_vf_dl_ops;
+
+ dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev);
if (!dl) {
netdev_warn(bp->dev, "devlink_alloc failed\n");
return -ENOMEM;
@@ -763,7 +766,7 @@ int bnxt_dl_register(struct bnxt *bp)
bp->hwrm_spec_code > 0x10803)
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- rc = devlink_register(dl, &bp->pdev->dev);
+ rc = devlink_register(dl);
if (rc) {
netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
goto err_dl_free;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 786ca51e669b..485252d12245 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -768,8 +768,13 @@ static void bnxt_get_ringparam(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
- ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
+ ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ } else {
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = 0;
+ }
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 81f40ab748f1..2fe3c9081f8d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -56,16 +56,19 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
}
/* Caller holds ptp_lock */
-static u64 bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts)
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 ns;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EIO;
ptp_read_system_prets(sts);
- ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
- return ns;
+ *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ return 0;
}
static void bnxt_ptp_get_current_time(struct bnxt *bp)
@@ -76,7 +79,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
return;
spin_lock_bh(&ptp->ptp_lock);
WRITE_ONCE(ptp->old_time, ptp->current_time);
- ptp->current_time = bnxt_refclk_read(bp, NULL);
+ bnxt_refclk_read(bp, NULL, &ptp->current_time);
spin_unlock_bh(&ptp->ptp_lock);
}
@@ -110,9 +113,14 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns, cycles;
+ int rc;
spin_lock_bh(&ptp->ptp_lock);
- cycles = bnxt_refclk_read(ptp->bp, sts);
+ rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
+ if (rc) {
+ spin_unlock_bh(&ptp->ptp_lock);
+ return rc;
+ }
ns = timecounter_cyc2time(&ptp->tc, cycles);
spin_unlock_bh(&ptp->ptp_lock);
*ts = ns_to_timespec64(ns);
@@ -149,10 +157,207 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
return rc;
}
-static int bnxt_ptp_enable(struct ptp_clock_info *ptp,
+void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct ptp_clock_event event;
+ u64 ns, pps_ts;
+
+ pps_ts = EVENT_PPS_TS(data2, data1);
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, pps_ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+
+ switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
+ case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
+ event.pps_times.ts_real = ns_to_timespec64(ns);
+ event.type = PTP_CLOCK_PPSUSR;
+ event.index = EVENT_DATA2_PPS_PIN_NUM(data2);
+ break;
+ case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL:
+ event.timestamp = ns;
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = EVENT_DATA2_PPS_PIN_NUM(data2);
+ break;
+ }
+
+ ptp_clock_event(bp->ptp_cfg->ptp_clock, &event);
+}
+
+static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
+{
+ struct hwrm_func_ptp_pin_cfg_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u8 state = usage != BNXT_PPS_PIN_NONE;
+ u8 *pin_state, *pin_usg;
+ u32 enables;
+ int rc;
+
+ if (!TSIO_PIN_VALID(pin)) {
+ netdev_err(ptp->bp->dev, "1PPS: Invalid pin. Check pin-function configuration\n");
+ return -EOPNOTSUPP;
+ }
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_CFG, -1, -1);
+ enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE |
+ FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2);
+ req.enables = cpu_to_le32(enables);
+
+ pin_state = &req.pin0_state;
+ pin_usg = &req.pin0_usage;
+
+ *(pin_state + (pin * 2)) = state;
+ *(pin_usg + (pin * 2)) = usage;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+
+ ptp->pps_info.pins[pin].usage = usage;
+ ptp->pps_info.pins[pin].state = state;
+
+ return 0;
+}
+
+static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
+{
+ struct hwrm_func_ptp_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
+ req.enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
+ req.ptp_pps_event = event;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+void bnxt_ptp_reapply_pps(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pps *pps;
+ u32 pin = 0;
+ int rc;
+
+ if (!ptp || !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) ||
+ !(ptp->ptp_info.pin_config))
+ return;
+ pps = &ptp->pps_info;
+ for (pin = 0; pin < BNXT_MAX_TSIO_PINS; pin++) {
+ if (pps->pins[pin].state) {
+ rc = bnxt_ptp_cfg_pin(bp, pin, pps->pins[pin].usage);
+ if (!rc && pps->pins[pin].event)
+ rc = bnxt_ptp_cfg_event(bp,
+ pps->pins[pin].event);
+ if (rc)
+ netdev_err(bp->dev, "1PPS: Failed to configure pin%d\n",
+ pin);
+ }
+ }
+}
+
+static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
+ u64 *cycles_delta)
+{
+ u64 cycles_now;
+ u64 nsec_now, nsec_delta;
+ int rc;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
+ if (rc) {
+ spin_unlock_bh(&ptp->ptp_lock);
+ return rc;
+ }
+ nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
+ spin_unlock_bh(&ptp->ptp_lock);
+
+ nsec_delta = target_ns - nsec_now;
+ *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
+ return 0;
+}
+
+static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
+ struct ptp_clock_request *rq)
+{
+ struct hwrm_func_ptp_cfg_input req = {0};
+ struct bnxt *bp = ptp->bp;
+ struct timespec64 ts;
+ u64 target_ns, delta;
+ u16 enables;
+ int rc;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ target_ns = timespec64_to_ns(&ts);
+
+ rc = bnxt_get_target_cycles(ptp, target_ns, &delta);
+ if (rc)
+ return rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
+
+ enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD |
+ FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP |
+ FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE;
+ req.enables = cpu_to_le16(enables);
+ req.ptp_pps_event = 0;
+ req.ptp_freq_adj_dll_source = 0;
+ req.ptp_freq_adj_dll_phase = 0;
+ req.ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
+ req.ptp_freq_adj_ext_up = 0;
+ req.ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
- return -EOPNOTSUPP;
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct bnxt *bp = ptp->bp;
+ u8 pin_id;
+ int rc;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Configure an External PPS IN */
+ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (!on)
+ break;
+ rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
+ if (rc)
+ return rc;
+ rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_EXTERNAL);
+ if (!rc)
+ ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL;
+ return rc;
+ case PTP_CLK_REQ_PEROUT:
+ /* Configure a Periodic PPS OUT */
+ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (!on)
+ break;
+
+ rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_OUT);
+ if (!rc)
+ rc = bnxt_ptp_perout_cfg(ptp, rq);
+
+ return rc;
+ case PTP_CLK_REQ_PPS:
+ /* Configure PHC PPS IN */
+ rc = bnxt_ptp_cfg_pin(bp, 0, BNXT_PPS_PIN_PPS_IN);
+ if (rc)
+ return rc;
+ rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_INTERNAL);
+ if (!rc)
+ ptp->pps_info.pins[0].event = BNXT_PPS_EVENT_INTERNAL;
+ return rc;
+ default:
+ netdev_err(ptp->bp->dev, "Unrecognized PIN function\n");
+ return -EOPNOTSUPP;
+ }
+
+ return bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_NONE);
}
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
@@ -311,8 +516,10 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp)
static u64 bnxt_cc_read(const struct cyclecounter *cc)
{
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
+ u64 ns = 0;
- return bnxt_refclk_read(ptp->bp, NULL);
+ bnxt_refclk_read(ptp->bp, NULL, &ns);
+ return ns;
}
static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
@@ -410,6 +617,80 @@ static const struct ptp_clock_info bnxt_ptp_caps = {
.enable = bnxt_ptp_enable,
};
+static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ /* Allow only PPS pin function configuration */
+ if (ptp->pps_info.pins[pin].usage <= BNXT_PPS_PIN_PPS_OUT &&
+ func != PTP_PF_PHYSYNC)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+}
+
+/* bp->hwrm_cmd_lock held by the caller */
+static int bnxt_ptp_pps_init(struct bnxt *bp)
+{
+ struct hwrm_func_ptp_pin_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_ptp_pin_qcfg_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct ptp_clock_info *ptp_info;
+ struct bnxt_pps *pps_info;
+ u8 *pin_usg;
+ u32 i, rc;
+
+ /* Query current/default PIN CFG */
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_QCFG, -1, -1);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc || !resp->num_pins)
+ return -EOPNOTSUPP;
+
+ ptp_info = &ptp->ptp_info;
+ pps_info = &ptp->pps_info;
+ pps_info->num_pins = resp->num_pins;
+ ptp_info->n_pins = pps_info->num_pins;
+ ptp_info->pin_config = kcalloc(ptp_info->n_pins,
+ sizeof(*ptp_info->pin_config),
+ GFP_KERNEL);
+ if (!ptp_info->pin_config)
+ return -ENOMEM;
+
+ /* Report the TSIO capability to kernel */
+ pin_usg = &resp->pin0_usage;
+ for (i = 0; i < pps_info->num_pins; i++, pin_usg++) {
+ snprintf(ptp_info->pin_config[i].name,
+ sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i);
+ ptp_info->pin_config[i].index = i;
+ ptp_info->pin_config[i].chan = i;
+ if (*pin_usg == BNXT_PPS_PIN_PPS_IN)
+ ptp_info->pin_config[i].func = PTP_PF_EXTTS;
+ else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT)
+ ptp_info->pin_config[i].func = PTP_PF_PEROUT;
+ else
+ ptp_info->pin_config[i].func = PTP_PF_NONE;
+
+ pps_info->pins[i].usage = *pin_usg;
+ }
+
+ /* Only 1 each of ext_ts and per_out pins is available in HW */
+ ptp_info->n_ext_ts = 1;
+ ptp_info->n_per_out = 1;
+ ptp_info->pps = 1;
+ ptp_info->verify = bnxt_ptp_verify;
+
+ return 0;
+}
+
+static bool bnxt_pps_config_ok(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
+}
+
int bnxt_ptp_init(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -422,6 +703,15 @@ int bnxt_ptp_init(struct bnxt *bp)
if (rc)
return rc;
+ if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
+ return 0;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
@@ -435,6 +725,10 @@ int bnxt_ptp_init(struct bnxt *bp)
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
ptp->ptp_info = bnxt_ptp_caps;
+ if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+ if (bnxt_ptp_pps_init(bp))
+ netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n");
+ }
ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
if (IS_ERR(ptp->ptp_clock)) {
int err = PTR_ERR(ptp->ptp_clock);
@@ -445,7 +739,7 @@ int bnxt_ptp_init(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_CHIP_P5) {
spin_lock_bh(&ptp->ptp_lock);
- ptp->current_time = bnxt_refclk_read(bp, NULL);
+ bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
spin_unlock_bh(&ptp->ptp_lock);
ptp_schedule_worker(ptp->ptp_clock, 0);
@@ -464,6 +758,9 @@ void bnxt_ptp_clear(struct bnxt *bp)
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 524f1c272054..fa5f05708e6d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -22,11 +22,62 @@
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
+struct pps_pin {
+ u8 event;
+ u8 usage;
+ u8 state;
+};
+
+#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
+
+#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
+ ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
+
+#define EVENT_DATA2_PPS_PIN_NUM(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT)
+
+#define BNXT_DATA2_UPPER_MSK \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK
+
+#define BNXT_DATA2_UPPER_SFT \
+ (32 - \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT)
+
+#define BNXT_DATA1_LOWER_MSK \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK
+
+#define BNXT_DATA1_LOWER_SFT \
+ ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT
+
+#define EVENT_PPS_TS(data2, data1) \
+ (((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\
+ (((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT))
+
+#define BNXT_PPS_PIN_DISABLE 0
+#define BNXT_PPS_PIN_ENABLE 1
+#define BNXT_PPS_PIN_NONE 0
+#define BNXT_PPS_PIN_PPS_IN 1
+#define BNXT_PPS_PIN_PPS_OUT 2
+#define BNXT_PPS_PIN_SYNC_IN 3
+#define BNXT_PPS_PIN_SYNC_OUT 4
+
+#define BNXT_PPS_EVENT_INTERNAL 1
+#define BNXT_PPS_EVENT_EXTERNAL 2
+
+struct bnxt_pps {
+ u8 num_pins;
+#define BNXT_MAX_TSIO_PINS 4
+ struct pps_pin pins[BNXT_MAX_TSIO_PINS];
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
struct cyclecounter cc;
struct timecounter tc;
+ struct bnxt_pps pps_info;
/* serialize timecounter access */
spinlock_t ptp_lock;
struct sk_buff *tx_skb;
@@ -77,6 +128,8 @@ do { \
#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index db74241935ab..8507198992df 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3659,7 +3659,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_tx_timeout = bcmgenet_timeout,
.ndo_set_rx_mode = bcmgenet_set_rx_mode,
.ndo_set_mac_address = bcmgenet_set_mac_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
@@ -3972,8 +3972,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
*/
dev->needed_headroom += 64;
- netdev_boot_setup_check(dev);
-
priv->dev = dev;
priv->pdev = pdev;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 5b4568c2ad1c..f38f40eb966e 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2136,7 +2136,7 @@ static const struct net_device_ops sbmac_netdev_ops = {
.ndo_start_xmit = sbmac_start_tx,
.ndo_set_rx_mode = sbmac_set_rx_mode,
.ndo_tx_timeout = sbmac_tx_timeout,
- .ndo_do_ioctl = sbmac_mii_ioctl,
+ .ndo_eth_ioctl = sbmac_mii_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b0e49643f483..6f82eeaa4b9f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14290,7 +14290,7 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = tg3_set_rx_mode,
.ndo_set_mac_address = tg3_set_mac_addr,
- .ndo_do_ioctl = tg3_ioctl,
+ .ndo_eth_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
.ndo_fix_features = tg3_fix_features,
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index e432a68ac520..5b2a461dfd28 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,6 +22,7 @@ if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
+ depends on PTP_1588_CLOCK_OPTIONAL
select PHYLINK
select CRC32
help
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 7d2fe13a52f8..d13fb1d31821 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3664,7 +3664,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_get_stats = macb_get_stats,
- .ndo_do_ioctl = macb_ioctl,
+ .ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -4323,7 +4323,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_get_stats = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = macb_ioctl,
+ .ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
@@ -4533,6 +4533,14 @@ static const struct macb_config sama5d2_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config sama5d29_config = {
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
+};
+
static const struct macb_config sama5d3_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
| MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
@@ -4610,6 +4618,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
+ { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 4875cdae622e..1c76c95b0b27 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -66,7 +66,7 @@ config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT && PCI
depends on PCI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select LIBCRC32C
select NET_DEVLINK
@@ -91,7 +91,7 @@ config OCTEON_MGMT_ETHERNET
config LIQUIDIO_VF
tristate "Cavium LiquidIO VF support"
depends on 64BIT && PCI_MSI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Cavium LiquidIO Intelligent Server Adapter
based on CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 591229b96257..2907e13b9df6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1457,7 +1457,7 @@ static void free_netsgbuf(void *buf)
while (frags--) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- pci_unmap_page((lio->oct_dev)->pci_dev,
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
g->sg[(i >> 2)].ptr[(i & 3)],
skb_frag_size(frag), DMA_TO_DEVICE);
i++;
@@ -1500,7 +1500,7 @@ static void free_netsgbuf_with_resp(void *buf)
while (frags--) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- pci_unmap_page((lio->oct_dev)->pci_dev,
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
g->sg[(i >> 2)].ptr[(i & 3)],
skb_frag_size(frag), DMA_TO_DEVICE);
i++;
@@ -3223,7 +3223,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
.ndo_set_vf_mac = liquidio_set_vf_mac,
@@ -3750,7 +3750,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
}
devlink = devlink_alloc(&liquidio_devlink_ops,
- sizeof(struct lio_devlink_priv));
+ sizeof(struct lio_devlink_priv),
+ &octeon_dev->pci_dev->dev);
if (!devlink) {
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
goto setup_nic_dev_free;
@@ -3759,7 +3760,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio_devlink = devlink_priv(devlink);
lio_devlink->oct = octeon_dev;
- if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
+ if (devlink_register(devlink)) {
devlink_free(devlink);
dev_err(&octeon_dev->pci_dev->dev,
"devlink registration failed\n");
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index ffddb3126a32..c6fe0f2a4d0e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -843,7 +843,7 @@ static void free_netsgbuf(void *buf)
while (frags--) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- pci_unmap_page((lio->oct_dev)->pci_dev,
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
g->sg[(i >> 2)].ptr[(i & 3)],
skb_frag_size(frag), DMA_TO_DEVICE);
i++;
@@ -887,7 +887,7 @@ static void free_netsgbuf_with_resp(void *buf)
while (frags--) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- pci_unmap_page((lio->oct_dev)->pci_dev,
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
g->sg[(i >> 2)].ptr[(i & 3)],
skb_frag_size(frag), DMA_TO_DEVICE);
i++;
@@ -1889,7 +1889,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
};
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 48ff6fb0eed9..30463a6d1f8c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1373,7 +1373,7 @@ static const struct net_device_ops octeon_mgmt_ops = {
.ndo_start_xmit = octeon_mgmt_xmit,
.ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_eth_ioctl = octeon_mgmt_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 9361f964bb9b..691e1475d55e 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1322,18 +1322,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable DMA configuration\n");
goto err_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
- goto err_release_regions;
- }
-
/* MAP PF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!nic->reg_base) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index e2b290135fd9..d1667b759522 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2096,7 +2096,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
- .ndo_do_ioctl = nicvf_ioctl,
+ .ndo_eth_ioctl = nicvf_ioctl,
.ndo_set_rx_mode = nicvf_set_rx_mode,
};
@@ -2130,18 +2130,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "Unable to get usable DMA configuration\n");
goto err_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
- goto err_release_regions;
- }
-
qcount = netif_get_num_default_rss_queues();
/* Restrict multiqset support only for host bound VFs */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 8ba0e08e5e64..c931ec8cac40 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -69,6 +69,7 @@ config CHELSIO_T3
config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n) && (TLS || TLS=n)
+ depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select MDIO
select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 512da98019c6..e7575d41f4f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -924,7 +924,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_get_stats = t1_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = t1_set_rxmode,
- .ndo_do_ioctl = t1_ioctl,
+ .ndo_eth_ioctl = t1_ioctl,
.ndo_change_mtu = t1_change_mtu,
.ndo_set_mac_address = t1_set_mac_addr,
.ndo_fix_features = t1_fix_features,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 57f210c53afc..72af9d2a00ae 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2135,13 +2135,18 @@ static int in_range(int val, int lo, int hi)
return val < 0 || (val <= hi && val >= lo);
}
-static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+static int cxgb_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifreq,
+ void __user *useraddr,
+ int cmd)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 cmd;
int ret;
+ if (cmd != SIOCCHIOCTL)
+ return -EOPNOTSUPP;
+
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
@@ -2546,8 +2551,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
fallthrough;
case SIOCGMIIPHY:
return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
- case SIOCCHIOCTL:
- return cxgb_extension_ioctl(dev, req->ifr_data);
default:
return -EOPNOTSUPP;
}
@@ -3181,7 +3184,8 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_get_stats = cxgb_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = cxgb_set_rxmode,
- .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_eth_ioctl = cxgb_ioctl,
+ .ndo_siocdevprivate = cxgb_siocdevprivate,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_fix_features = cxgb_fix_features,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 6260b3bebd2b..786ceae34488 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1441,7 +1441,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
} else if (iconf & USE_ENC_IDX_F) {
if (f->fs.val.encap_vld) {
struct port_info *pi = netdev_priv(f->dev);
- u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
/* allocate MPS TCAM entry */
ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
@@ -1688,7 +1688,7 @@ int __cxgb4_set_filter(struct net_device *dev, int ftid,
} else if (iconf & USE_ENC_IDX_F) {
if (f->fs.val.encap_vld) {
struct port_info *pi = netdev_priv(f->dev);
- u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
/* allocate MPS TCAM entry */
ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dbf9a0e6601d..aa8573202c37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3872,7 +3872,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_set_features = cxgb_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_eth_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2820a0bb971b..2842628ad2c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2837,7 +2837,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = cxgb4vf_do_ioctl,
+ .ndo_eth_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
.ndo_fix_features = cxgb4vf_fix_features,
.ndo_set_features = cxgb4vf_set_features,
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index d8af9e64dd1e..dac1764ba740 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_CIRRUS
bool "Cirrus devices"
default y
- depends on ISA || EISA || ARM || MAC
+ depends on ISA || EISA || ARM || MAC || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,9 +18,16 @@ config NET_VENDOR_CIRRUS
if NET_VENDOR_CIRRUS
config CS89x0
- tristate "CS89x0 support"
- depends on ISA || EISA || ARM
+ tristate
+
+config CS89x0_ISA
+ tristate "CS89x0 ISA driver support"
+ depends on HAS_IOPORT_MAP
+ depends on ISA
depends on !PPC32
+ depends on CS89x0_PLATFORM=n
+ select NETDEV_LEGACY_INIT
+ select CS89x0
help
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
@@ -30,15 +37,15 @@ config CS89x0
will be called cs89x0.
config CS89x0_PLATFORM
- bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
- default !HAS_IOPORT_MAP
- depends on CS89x0
+ tristate "CS89x0 platform driver support"
+ depends on ARM || COMPILE_TEST
+ select CS89x0
help
- Say Y to compile the cs89x0 driver as a platform driver. This
- makes this driver suitable for use on certain evaluation boards
- such as the iMX21ADS.
+ Say Y to compile the cs89x0 platform driver. This makes this driver
+ suitable for use on certain evaluation boards such as the iMX21ADS.
- If you are unsure, say N.
+ To compile this driver as a module, choose M here. The module
+ will be called cs89x0.
config EP93XX_ETH
tristate "EP93xx Ethernet support"
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 33ace3307059..d0c4c8b7a15a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -104,7 +104,7 @@ static char version[] __initdata =
* them to system IRQ numbers. This mapping is card specific and is set to
* the configuration of the Cirrus Eval board for this chip.
*/
-#ifndef CONFIG_CS89x0_PLATFORM
+#if IS_ENABLED(CONFIG_CS89x0_ISA)
static unsigned int netcard_portlist[] __used __initdata = {
0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0
@@ -292,7 +292,7 @@ write_irq(struct net_device *dev, int chip_type, int irq)
int i;
if (chip_type == CS8900) {
-#ifndef CONFIG_CS89x0_PLATFORM
+#if IS_ENABLED(CONFIG_CS89x0_ISA)
/* Search the mapping table for the corresponding IRQ pin. */
for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
if (cs8900_irq_map[i] == irq)
@@ -859,7 +859,7 @@ net_open(struct net_device *dev)
goto bad_out;
}
} else {
-#if !defined(CONFIG_CS89x0_PLATFORM)
+#if IS_ENABLED(CONFIG_CS89x0_ISA)
if (((1 << dev->irq) & lp->irq_map) == 0) {
pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
dev->name, dev->irq, lp->irq_map);
@@ -1523,7 +1523,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
dev->irq = i;
} else {
i = lp->isa_config & INT_NO_MASK;
-#ifndef CONFIG_CS89x0_PLATFORM
+#if IS_ENABLED(CONFIG_CS89x0_ISA)
if (lp->chip_type == CS8900) {
/* Translate the IRQ using the IRQ mapping table. */
if (i >= ARRAY_SIZE(cs8900_irq_map))
@@ -1576,7 +1576,7 @@ out1:
return retval;
}
-#ifndef CONFIG_CS89x0_PLATFORM
+#if IS_ENABLED(CONFIG_CS89x0_ISA)
/*
* This function converts the I/O port address used by the cs89x0_probe() and
* init_module() functions to the I/O memory address used by the
@@ -1682,11 +1682,7 @@ out:
pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
return ERR_PTR(err);
}
-#endif
-#endif
-
-#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM)
-
+#else
static struct net_device *dev_cs89x0;
/* Support the 'debug' module parm even if we're compiled for non-debug to
@@ -1757,9 +1753,9 @@ MODULE_LICENSE("GPL");
* (hw or software util)
*/
-int __init init_module(void)
+static int __init cs89x0_isa_init_module(void)
{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ struct net_device *dev;
struct net_local *lp;
int ret = 0;
@@ -1768,6 +1764,7 @@ int __init init_module(void)
#else
debug = 0;
#endif
+ dev = alloc_etherdev(sizeof(struct net_local));
if (!dev)
return -ENOMEM;
@@ -1826,9 +1823,9 @@ out:
free_netdev(dev);
return ret;
}
+module_init(cs89x0_isa_init_module);
-void __exit
-cleanup_module(void)
+static void __exit cs89x0_isa_cleanup_module(void)
{
struct net_local *lp = netdev_priv(dev_cs89x0);
@@ -1838,9 +1835,11 @@ cleanup_module(void)
release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
free_netdev(dev_cs89x0);
}
-#endif /* MODULE && !CONFIG_CS89x0_PLATFORM */
+module_exit(cs89x0_isa_cleanup_module);
+#endif /* MODULE */
+#endif /* CONFIG_CS89x0_ISA */
-#ifdef CONFIG_CS89x0_PLATFORM
+#if IS_ENABLED(CONFIG_CS89x0_PLATFORM)
static int __init cs89x0_platform_probe(struct platform_device *pdev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 9f5e5ec69991..072fac5f5d24 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -733,7 +733,7 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_open = ep93xx_open,
.ndo_stop = ep93xx_close,
.ndo_start_xmit = ep93xx_xmit,
- .ndo_do_ioctl = ep93xx_ioctl,
+ .ndo_eth_ioctl = ep93xx_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 2a8bf53c2f75..e842de6f6635 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1372,7 +1372,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
.ndo_start_xmit = dm9000_start_xmit,
.ndo_tx_timeout = dm9000_timeout,
.ndo_set_rx_mode = dm9000_hash_table,
- .ndo_do_ioctl = dm9000_ioctl,
+ .ndo_eth_ioctl = dm9000_ioctl,
.ndo_set_features = dm9000_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index b125d7faefdf..36ab4cbf2ad0 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -443,6 +443,7 @@
=========================================================================
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -902,7 +903,8 @@ static int de4x5_close(struct net_device *dev);
static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
static void set_multicast_list(struct net_device *dev);
-static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
/*
** Private functions
@@ -1084,7 +1086,7 @@ static const struct net_device_ops de4x5_netdev_ops = {
.ndo_start_xmit = de4x5_queue_pkt,
.ndo_get_stats = de4x5_get_stats,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = de4x5_ioctl,
+ .ndo_siocdevprivate = de4x5_siocdevprivate,
.ndo_set_mac_address= eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -5357,7 +5359,7 @@ de4x5_dbg_rx(struct sk_buff *skb, int len)
** this function is only used for my testing.
*/
static int
-de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct de4x5_private *lp = netdev_priv(dev);
struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
@@ -5371,6 +5373,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
} tmp;
u_long flags = 0;
+ if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
+ return -EOPNOTSUPP;
+
switch(ioc->cmd) {
case DE4X5_GET_HWADDR: /* Get the hardware address */
ioc->len = ETH_ALEN;
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 011604787b8e..55d6fc99f40b 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -362,7 +362,7 @@ void tulip_select_media(struct net_device *dev, int startup)
iowrite32(0x33, ioaddr + CSR12);
new_csr6 = 0x01860000;
/* Trigger autonegotiation. */
- iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+ iowrite32(0x0001F868, ioaddr + 0xB8);
} else {
iowrite32(0x32, ioaddr + CSR12);
new_csr6 = 0x00420000;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c1dcd6ca1457..fcedd733bacb 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1271,7 +1271,7 @@ static const struct net_device_ops tulip_netdev_ops = {
.ndo_tx_timeout = tulip_tx_timeout,
.ndo_stop = tulip_close,
.ndo_get_stats = tulip_get_stats,
- .ndo_do_ioctl = private_ioctl,
+ .ndo_eth_ioctl = private_ioctl,
.ndo_set_rx_mode = set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 1876f15dd827..85b99099c6b9 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -341,7 +341,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 734acb834c98..202ecb132053 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -95,7 +95,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_rx_mode = set_multicast,
- .ndo_do_ioctl = rio_ioctl,
+ .ndo_eth_ioctl = rio_ioctl,
.ndo_tx_timeout = rio_tx_timeout,
};
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index ee0ca712dd1c..c36d186dffed 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -479,7 +479,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = change_mtu,
.ndo_set_mac_address = sundance_set_mac_addr,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 48c6eb142dcc..6c51cf991dad 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -742,7 +742,7 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_stop = dnet_close,
.ndo_get_stats = dnet_get_stats,
.ndo_start_xmit = dnet_start_xmit,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e1b43b07755b..ed1ed48e7483 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1009,7 +1009,7 @@ static const struct ethtool_ops ethoc_ethtool_ops = {
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
- .ndo_do_ioctl = ethoc_ioctl,
+ .ndo_eth_ioctl = ethoc_ioctl,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 11dbbfd38770..ff76e401a014 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1616,7 +1616,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_start_xmit = ftgmac100_hard_start_xmit,
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = ftgmac100_tx_timeout,
.ndo_set_rx_mode = ftgmac100_set_rx_mode,
.ndo_set_features = ftgmac100_set_features,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 5a1a8f2ea63c..8a341e2d5833 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1043,7 +1043,7 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_start_xmit = ftmac100_hard_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ftmac100_do_ioctl,
+ .ndo_eth_ioctl = ftmac100_do_ioctl,
};
/******************************************************************************
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 0f141c14d72d..25c91b3c5fd3 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -463,7 +463,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = start_tx,
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = mii_ioctl,
+ .ndo_eth_ioctl = mii_ioctl,
.ndo_tx_timeout = fealnx_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 2d1abdd58fab..e04e1c5cb013 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -25,10 +25,10 @@ config FEC
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
+ depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
imply NET_SELFTESTS
- imply PTP_1588_CLOCK
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index 626ec58a0afc..0e1439fd00bd 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -4,7 +4,6 @@ menuconfig FSL_DPAA_ETH
depends on FSL_DPAA && FSL_FMAN
select PHYLIB
select FIXED_PHY
- select FSL_FMAN_MAC
help
Data Path Acceleration Architecture Ethernet driver,
supporting the Freescale QorIQ chips.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e6826561cf11..685d2d8a3b36 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3157,7 +3157,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
- .ndo_do_ioctl = dpaa_ioctl,
+ .ndo_eth_ioctl = dpaa_ioctl,
.ndo_setup_tc = dpaa_setup_tc,
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index c2ef74052ef8..3d9842af7f10 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -11,7 +11,7 @@ fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpa
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
-fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o
+fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o dpaa2-mac.o dpmac.o
# Needed by the tracing framework
CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 833696245565..605a39f892b9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -68,7 +68,7 @@ dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id)
struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
struct dpaa2_fapr *fapr)
{
- struct dpaa2_faf_error_bit {
+ static const struct dpaa2_faf_error_bit {
int position;
enum devlink_trap_generic_id trap_id;
} faf_bits[] = {
@@ -196,7 +196,8 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
struct dpaa2_eth_devlink_priv *dl_priv;
int err;
- priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv));
+ priv->devlink =
+ devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev);
if (!priv->devlink) {
dev_err(dev, "devlink_alloc failed\n");
return -ENOMEM;
@@ -204,7 +205,7 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
dl_priv = devlink_priv(priv->devlink);
dl_priv->dpaa2_priv = priv;
- err = devlink_register(priv->devlink, dev);
+ err = devlink_register(priv->devlink);
if (err) {
dev_err(dev, "devlink_register() = %d\n", err);
goto devlink_free;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 973352393bd4..7065c71ed7b8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2594,7 +2594,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_get_stats64 = dpaa2_eth_get_stats,
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
- .ndo_do_ioctl = dpaa2_eth_ioctl,
+ .ndo_eth_ioctl = dpaa2_eth_ioctl,
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
@@ -4138,7 +4138,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
int err;
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
- dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
+ dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 70e04321c420..720c9230cab5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -15,18 +15,18 @@ static struct {
enum dpsw_counter id;
char name[ETH_GSTRING_LEN];
} dpaa2_switch_ethtool_counters[] = {
- {DPSW_CNT_ING_FRAME, "rx frames"},
- {DPSW_CNT_ING_BYTE, "rx bytes"},
- {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
- {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
- {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
- {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
- {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
- {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
- {DPSW_CNT_EGR_FRAME, "tx frames"},
- {DPSW_CNT_EGR_BYTE, "tx bytes"},
- {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
- {DPSW_CNT_ING_NO_BUFF_DISCARD, "rx discarded no buffer frames"},
+ {DPSW_CNT_ING_FRAME, "[hw] rx frames"},
+ {DPSW_CNT_ING_BYTE, "[hw] rx bytes"},
+ {DPSW_CNT_ING_FLTR_FRAME, "[hw] rx filtered frames"},
+ {DPSW_CNT_ING_FRAME_DISCARD, "[hw] rx discarded frames"},
+ {DPSW_CNT_ING_BCAST_FRAME, "[hw] rx bcast frames"},
+ {DPSW_CNT_ING_BCAST_BYTES, "[hw] rx bcast bytes"},
+ {DPSW_CNT_ING_MCAST_FRAME, "[hw] rx mcast frames"},
+ {DPSW_CNT_ING_MCAST_BYTE, "[hw] rx mcast bytes"},
+ {DPSW_CNT_EGR_FRAME, "[hw] tx frames"},
+ {DPSW_CNT_EGR_BYTE, "[hw] tx bytes"},
+ {DPSW_CNT_EGR_FRAME_DISCARD, "[hw] tx discarded frames"},
+ {DPSW_CNT_ING_NO_BUFF_DISCARD, "[hw] rx nobuffer discards"},
};
#define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters)
@@ -62,6 +62,10 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev,
struct dpsw_link_state state = {0};
int err = 0;
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+ link_ksettings);
+
err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx,
@@ -95,6 +99,10 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
bool if_running;
int err = 0, ret;
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+ link_ksettings);
+
/* Interface needs to be down to change link settings */
if_running = netif_running(netdev);
if (if_running) {
@@ -134,11 +142,17 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
return err;
}
-static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset)
+static int
+dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
+
switch (sset) {
case ETH_SS_STATS:
- return DPAA2_SWITCH_NUM_COUNTERS;
+ if (port_priv->mac)
+ num_ss_stats += dpaa2_mac_get_sset_count();
+ return num_ss_stats;
default:
return -EOPNOTSUPP;
}
@@ -147,14 +161,19 @@ static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset)
static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- dpaa2_switch_ethtool_counters[i].name,
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
+ memcpy(p, dpaa2_switch_ethtool_counters[i].name,
ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (port_priv->mac)
+ dpaa2_mac_get_strings(p);
break;
}
}
@@ -176,6 +195,9 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
dpaa2_switch_ethtool_counters[i].name, err);
}
+
+ if (port_priv->mac)
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
}
const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index f9451ec5f2cb..d6eefbbf163f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -111,11 +111,11 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
return 0;
}
-int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct ethsw_core *ethsw = filter_block->ethsw;
struct dpsw_acl_key *acl_key = &entry->key;
struct device *dev = ethsw->dev;
u8 *cmd_buff;
@@ -136,7 +136,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
}
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, acl_entry_cfg);
+ filter_block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
@@ -150,12 +150,13 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
return 0;
}
-static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct dpaa2_switch_acl_entry *entry)
+static int
+dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
{
struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
struct dpsw_acl_key *acl_key = &entry->key;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct ethsw_core *ethsw = block->ethsw;
struct device *dev = ethsw->dev;
u8 *cmd_buff;
int err;
@@ -175,7 +176,7 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
}
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, acl_entry_cfg);
+ block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
@@ -190,19 +191,19 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
-dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
struct list_head *pos, *n;
int index = 0;
- if (list_empty(&acl_tbl->entries)) {
- list_add(&entry->list, &acl_tbl->entries);
+ if (list_empty(&block->acl_entries)) {
+ list_add(&entry->list, &block->acl_entries);
return index;
}
- list_for_each_safe(pos, n, &acl_tbl->entries) {
+ list_for_each_safe(pos, n, &block->acl_entries) {
tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
if (entry->prio < tmp->prio)
break;
@@ -213,13 +214,13 @@ dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static struct dpaa2_switch_acl_entry*
-dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
int index)
{
struct dpaa2_switch_acl_entry *tmp;
int i = 0;
- list_for_each_entry(tmp, &acl_tbl->entries, list) {
+ list_for_each_entry(tmp, &block->acl_entries, list) {
if (i == index)
return tmp;
++i;
@@ -229,37 +230,38 @@ dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
-dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry,
int precedence)
{
int err;
- err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ err = dpaa2_switch_acl_entry_remove(block, entry);
if (err)
return err;
entry->cfg.precedence = precedence;
- return dpaa2_switch_acl_entry_add(acl_tbl, entry);
+ return dpaa2_switch_acl_entry_add(block, entry);
}
-static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct dpaa2_switch_acl_entry *entry)
+static int
+dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
int index, i, precedence, err;
/* Add the new ACL entry to the linked list and get its index */
- index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
+ index = dpaa2_switch_acl_entry_add_to_list(block, entry);
/* Move up in priority the ACL entries to make space
* for the new filter.
*/
- precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
+ precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
for (i = 0; i < index; i++) {
- tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
- err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
precedence);
if (err)
return err;
@@ -269,19 +271,19 @@ static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
/* Add the new entry to hardware */
entry->cfg.precedence = precedence;
- err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
- acl_tbl->num_rules++;
+ err = dpaa2_switch_acl_entry_add(block, entry);
+ block->num_acl_rules++;
return err;
}
static struct dpaa2_switch_acl_entry *
-dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
unsigned long cookie)
{
struct dpaa2_switch_acl_entry *tmp, *n;
- list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
if (tmp->cookie == cookie)
return tmp;
}
@@ -289,13 +291,13 @@ dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
-dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp, *n;
int index = 0;
- list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
if (tmp->cookie == entry->cookie)
return index;
index++;
@@ -303,21 +305,34 @@ dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
return -ENOENT;
}
+static struct dpaa2_switch_mirror_entry *
+dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_mirror_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
static int
-dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
int index, i, precedence, err;
- index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
+ index = dpaa2_switch_acl_entry_get_index(block, entry);
/* Remove from hardware the ACL entry */
- err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ err = dpaa2_switch_acl_entry_remove(block, entry);
if (err)
return err;
- acl_tbl->num_rules--;
+ block->num_acl_rules--;
/* Remove it from the list also */
list_del(&entry->list);
@@ -325,8 +340,8 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
/* Move down in priority the entries over the deleted one */
precedence = entry->cfg.precedence;
for (i = index - 1; i >= 0; i--) {
- tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
- err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
+ err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
precedence);
if (err)
return err;
@@ -339,10 +354,10 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
return 0;
}
-static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
- struct flow_action_entry *cls_act,
- struct dpsw_acl_result *dpsw_act,
- struct netlink_ext_ack *extack)
+static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
+ struct flow_action_entry *cls_act,
+ struct dpsw_acl_result *dpsw_act,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -374,22 +389,110 @@ out:
return err;
}
-int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+static int
+dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry,
+ u16 to, struct netlink_ext_ack *extack)
+{
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct ethsw_port_priv *port_priv;
+ unsigned long ports_added = 0;
+ u16 vlan = entry->cfg.vlan_id;
+ bool mirror_port_enabled;
+ int err, port;
+
+ /* Setup the mirroring port */
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ if (!mirror_port_enabled) {
+ err = dpsw_set_reflection_if(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, to);
+ if (err)
+ return err;
+ ethsw->mirror_port = to;
+ }
+
+ /* Setup the same egress mirroring configuration on all the switch
+ * ports that share the same filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
+ port_priv = ethsw->ports[port];
+
+ /* We cannot add a per VLAN mirroring rule if the VLAN in
+ * question is not installed on the switch port.
+ */
+ if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
+ NL_SET_ERR_MSG(extack,
+ "VLAN must be installed on the switch port");
+ err = -EINVAL;
+ goto err_remove_filters;
+ }
+
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port, &entry->cfg);
+ if (err)
+ goto err_remove_filters;
+
+ ports_added |= BIT(port);
+ }
+
+ list_add(&entry->list, &block->mirror_entries);
+
+ return 0;
+
+err_remove_filters:
+ for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, &entry->cfg);
+ }
+
+ if (!mirror_port_enabled)
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return err;
+}
+
+static int
+dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
+ struct dpaa2_switch_mirror_entry *entry)
+{
+ struct dpsw_reflection_cfg *cfg = &entry->cfg;
+ unsigned long block_ports = block->ports;
+ struct ethsw_core *ethsw = block->ethsw;
+ int port;
+
+ /* Remove this mirroring configuration from all the ports belonging to
+ * the filter block.
+ */
+ for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port, cfg);
+
+ /* Also remove it from the list of mirror filters */
+ list_del(&entry->list);
+ kfree(entry);
+
+ /* If this was the last mirror filter, then unset the mirror port */
+ if (list_empty(&block->mirror_entries))
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
struct dpaa2_switch_acl_entry *acl_entry;
+ struct ethsw_core *ethsw = block->ethsw;
struct flow_action_entry *act;
int err;
- if (!flow_offload_has_one_action(&rule->action)) {
- NL_SET_ERR_MSG(extack, "Only singular actions are supported");
- return -EOPNOTSUPP;
- }
-
- if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
return -ENOMEM;
}
@@ -403,15 +506,15 @@ int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
goto free_acl_entry;
act = &rule->action.entries[0];
- err = dpaa2_switch_tc_parse_action(ethsw, act,
- &acl_entry->cfg.result, extack);
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
if (err)
goto free_acl_entry;
acl_entry->prio = cls->common.prio;
acl_entry->cookie = cls->cookie;
- err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
if (err)
goto free_acl_entry;
@@ -423,33 +526,171 @@ free_acl_entry:
return err;
}
-int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct flow_cls_offload *cls)
+static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
+ u16 *vlan)
{
- struct dpaa2_switch_acl_entry *entry;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring is supported only per VLAN");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
- entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
- if (!entry)
- return 0;
+ if (match.mask->vlan_priority != 0 ||
+ match.mask->vlan_dei != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only matching on VLAN ID supported");
+ return -EOPNOTSUPP;
+ }
- return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+ if (match.mask->vlan_id != 0xFFF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *vlan = (u16)match.key->vlan_id;
+ }
+
+ return 0;
}
-int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
- struct tc_cls_matchall_offload *cls)
+static int
+dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
- struct ethsw_core *ethsw = acl_tbl->ethsw;
- struct dpaa2_switch_acl_entry *acl_entry;
- struct flow_action_entry *act;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id, vlan;
int err;
- if (!flow_offload_has_one_action(&cls->rule->action)) {
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Parse the key */
+ err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
+ if (err)
+ return err;
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
+ tmp->cfg.vlan_id == vlan) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
+ mirror_entry->cfg.vlan_id = vlan;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&rule->action)) {
NL_SET_ERR_MSG(extack, "Only singular actions are supported");
return -EOPNOTSUPP;
}
- if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ act = &rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_flower_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_flower_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
+ struct flow_cls_offload *cls)
+{
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
+
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (dpaa2_switch_acl_tbl_is_full(block)) {
NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
return -ENOMEM;
}
@@ -459,15 +700,15 @@ int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
return -ENOMEM;
act = &cls->rule->action.entries[0];
- err = dpaa2_switch_tc_parse_action(ethsw, act,
- &acl_entry->cfg.result, extack);
+ err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
+ &acl_entry->cfg.result, extack);
if (err)
goto free_acl_entry;
acl_entry->prio = cls->common.prio;
acl_entry->cookie = cls->cookie;
- err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
if (err)
goto free_acl_entry;
@@ -479,14 +720,159 @@ free_acl_entry:
return err;
}
-int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+static int
+dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct ethsw_core *ethsw = block->ethsw;
+ struct dpaa2_switch_mirror_entry *tmp;
+ struct flow_action_entry *cls_act;
+ struct list_head *pos, *n;
+ bool mirror_port_enabled;
+ u16 if_id;
+
+ mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
+ cls_act = &cls->rule->action.entries[0];
+
+ /* Offload rules only when the destination is a DPAA2 switch port */
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+ if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+
+ /* We have a single mirror port but can configure egress mirroring on
+ * all the other switch ports. We need to allow mirroring rules only
+ * when the destination port is the same.
+ */
+ if (mirror_port_enabled && ethsw->mirror_port != if_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Multiple mirror ports not supported");
+ return -EBUSY;
+ }
+
+ /* Make sure that we don't already have a mirror rule with the same
+ * configuration. One matchall rule per block is the maximum.
+ */
+ list_for_each_safe(pos, n, &block->mirror_entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
+
+ if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matchall mirror filter already installed");
+ return -EBUSY;
+ }
+ }
+
+ mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
+ if (!mirror_entry)
+ return -ENOMEM;
+
+ mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
+ mirror_entry->cookie = cls->cookie;
+
+ return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
+ extack);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&cls->rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &cls->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_TRAP:
+ case FLOW_ACTION_DROP:
+ return dpaa2_switch_cls_matchall_replace_acl(block, cls);
+ case FLOW_ACTION_MIRRED:
+ return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+}
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_add_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_add;
+ }
+
+ return 0;
+
+unwind_add:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_mirror_entry *tmp;
+ int err;
+
+ list_for_each_entry(tmp, &block->mirror_entries, list) {
+ err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+ if (err)
+ goto unwind_remove;
+ }
+
+ return 0;
+
+unwind_remove:
+ list_for_each_entry(tmp, &block->mirror_entries, list)
+ dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tmp->cfg);
+
+ return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls)
{
- struct dpaa2_switch_acl_entry *entry;
+ struct dpaa2_switch_mirror_entry *mirror_entry;
+ struct dpaa2_switch_acl_entry *acl_entry;
+
+ /* If this filter is a an ACL one, remove it */
+ acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
+ cls->cookie);
+ if (acl_entry)
+ return dpaa2_switch_acl_tbl_remove_entry(block,
+ acl_entry);
- entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
- if (!entry)
- return 0;
+ /* If not, then it has to be a mirror */
+ mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
+ cls->cookie);
+ if (mirror_entry)
+ return dpaa2_switch_block_remove_mirror(block,
+ mirror_entry);
- return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+ return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 98cc0133c343..1419c8dccea2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -41,14 +41,14 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e
return NULL;
}
-static struct dpaa2_switch_acl_tbl *
-dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
+static struct dpaa2_switch_filter_block *
+dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
{
int i;
for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (!ethsw->acls[i].in_use)
- return &ethsw->acls[i];
+ if (!ethsw->filter_blocks[i].in_use)
+ return &ethsw->filter_blocks[i];
return NULL;
}
@@ -594,12 +594,18 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
-static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
+static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct dpsw_link_state state;
int err;
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ return 0;
+
/* Interrupts are received even though no one issued an 'ifconfig up'
* on the switch interface. Ignore these link state update interrupts
*/
@@ -677,12 +683,14 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
- /* Explicitly set carrier off, otherwise
- * netif_carrier_ok() will return true and cause 'ip link show'
- * to report the LOWER_UP flag, even though the link
- * notification wasn't even received.
- */
- netif_carrier_off(netdev);
+ if (!dpaa2_switch_port_is_type_phy(port_priv)) {
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(netdev);
+ }
err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
@@ -692,23 +700,12 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
return err;
}
- /* sync carrier state */
- err = dpaa2_switch_port_carrier_state_sync(netdev);
- if (err) {
- netdev_err(netdev,
- "dpaa2_switch_port_carrier_state_sync err %d\n", err);
- goto err_carrier_sync;
- }
-
dpaa2_switch_enable_ctrl_if_napi(ethsw);
- return 0;
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ phylink_start(port_priv->mac->phylink);
-err_carrier_sync:
- dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
- port_priv->ethsw_data->dpsw_handle,
- port_priv->idx);
- return err;
+ return 0;
}
static int dpaa2_switch_port_stop(struct net_device *netdev)
@@ -717,6 +714,13 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ phylink_stop(port_priv->mac->phylink);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+
err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
@@ -1127,28 +1131,28 @@ err_exit:
}
static int
-dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
struct flow_cls_offload *f)
{
switch (f->command) {
case FLOW_CLS_REPLACE:
- return dpaa2_switch_cls_flower_replace(acl_tbl, f);
+ return dpaa2_switch_cls_flower_replace(filter_block, f);
case FLOW_CLS_DESTROY:
- return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
+ return dpaa2_switch_cls_flower_destroy(filter_block, f);
default:
return -EOPNOTSUPP;
}
}
static int
-dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *f)
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
+ return dpaa2_switch_cls_matchall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
- return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
+ return dpaa2_switch_cls_matchall_destroy(block, f);
default:
return -EOPNOTSUPP;
}
@@ -1170,106 +1174,122 @@ static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
static LIST_HEAD(dpaa2_switch_block_cb_list);
-static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+static int
+dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
struct dpsw_acl_if_cfg acl_if_cfg;
int err;
- if (port_priv->acl_tbl)
+ if (port_priv->filter_block)
return -EINVAL;
acl_if_cfg.if_id[0] = port_priv->idx;
acl_if_cfg.num_ifs = 1;
err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, &acl_if_cfg);
+ block->acl_id, &acl_if_cfg);
if (err) {
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
return err;
}
- acl_tbl->ports |= BIT(port_priv->idx);
- port_priv->acl_tbl = acl_tbl;
+ block->ports |= BIT(port_priv->idx);
+ port_priv->filter_block = block;
return 0;
}
static int
dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+ struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
struct dpsw_acl_if_cfg acl_if_cfg;
int err;
- if (port_priv->acl_tbl != acl_tbl)
+ if (port_priv->filter_block != block)
return -EINVAL;
acl_if_cfg.if_id[0] = port_priv->idx;
acl_if_cfg.num_ifs = 1;
err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
- acl_tbl->id, &acl_if_cfg);
+ block->acl_id, &acl_if_cfg);
if (err) {
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
return err;
}
- acl_tbl->ports &= ~BIT(port_priv->idx);
- port_priv->acl_tbl = NULL;
+ block->ports &= ~BIT(port_priv->idx);
+ port_priv->filter_block = NULL;
return 0;
}
static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+ struct dpaa2_switch_filter_block *block)
{
- struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
+ struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
int err;
+ /* Offload all the mirror entries found in the block on this new port
+ * joining it.
+ */
+ err = dpaa2_switch_block_offload_mirror(block, port_priv);
+ if (err)
+ return err;
+
/* If the port is already bound to this ACL table then do nothing. This
* can happen when this port is the first one to join a tc block
*/
- if (port_priv->acl_tbl == acl_tbl)
+ if (port_priv->filter_block == block)
return 0;
- err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
if (err)
return err;
/* Mark the previous ACL table as being unused if this was the last
* port that was using it.
*/
- if (old_acl_tbl->ports == 0)
- old_acl_tbl->in_use = false;
+ if (old_block->ports == 0)
+ old_block->in_use = false;
- return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
}
-static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
- struct dpaa2_switch_acl_tbl *acl_tbl)
+static int
+dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *new_acl_tbl;
+ struct dpaa2_switch_filter_block *new_block;
int err;
+ /* Unoffload all the mirror entries found in the block from the
+ * port leaving it.
+ */
+ err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
+ if (err)
+ return err;
+
/* We are the last port that leaves a block (an ACL table).
* We'll continue to use this table.
*/
- if (acl_tbl->ports == BIT(port_priv->idx))
+ if (block->ports == BIT(port_priv->idx))
return 0;
- err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
if (err)
return err;
- if (acl_tbl->ports == 0)
- acl_tbl->in_use = false;
+ if (block->ports == 0)
+ block->in_use = false;
- new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
- new_acl_tbl->in_use = true;
- return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
+ new_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ new_block->in_use = true;
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
}
static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
@@ -1277,7 +1297,7 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
struct flow_block_cb *block_cb;
bool register_block = false;
int err;
@@ -1287,24 +1307,24 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
ethsw);
if (!block_cb) {
- /* If the ACL table is not already known, then this port must
- * be the first to join it. In this case, we can just continue
- * to use our private table
+ /* If the filter block is not already known, then this port
+ * must be the first to join it. In this case, we can just
+ * continue to use our private table
*/
- acl_tbl = port_priv->acl_tbl;
+ filter_block = port_priv->filter_block;
block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
- ethsw, acl_tbl, NULL);
+ ethsw, filter_block, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
register_block = true;
} else {
- acl_tbl = flow_block_cb_priv(block_cb);
+ filter_block = flow_block_cb_priv(block_cb);
}
flow_block_cb_incref(block_cb);
- err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
+ err = dpaa2_switch_port_block_bind(port_priv, filter_block);
if (err)
goto err_block_bind;
@@ -1327,7 +1347,7 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
struct flow_block_cb *block_cb;
int err;
@@ -1337,8 +1357,8 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
if (!block_cb)
return;
- acl_tbl = flow_block_cb_priv(block_cb);
- err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
+ filter_block = flow_block_cb_priv(block_cb);
+ err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
if (!err && !flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
@@ -1403,41 +1423,103 @@ bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
return netdev->netdev_ops == &dpaa2_switch_port_ops;
}
-static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
+static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
{
- int i;
+ struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
- dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
+ dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
+
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ mac = kzalloc(sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = port_priv->ethsw_data->mc_io;
+ mac->net_dev = port_priv->netdev;
+
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+ port_priv->mac = mac;
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "Error connecting to the MAC endpoint %pe\n",
+ ERR_PTR(err));
+ goto err_close_mac;
+ }
}
+
+ return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ port_priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
+}
+
+static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+{
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ dpaa2_mac_disconnect(port_priv->mac);
+
+ if (!dpaa2_switch_port_has_mac(port_priv))
+ return;
+
+ dpaa2_mac_close(port_priv->mac);
+ kfree(port_priv->mac);
+ port_priv->mac = NULL;
}
static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
{
struct device *dev = (struct device *)arg;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
-
- /* Mask the events and the if_id reserved bits to be cleared on read */
- u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
- int err;
+ struct ethsw_port_priv *port_priv;
+ u32 status = ~0;
+ int err, if_id;
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
if (err) {
dev_err(dev, "Can't get irq status (err %d)\n", err);
-
- err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
- DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
- if (err)
- dev_err(dev, "Can't clear irq status (err %d)\n", err);
goto out;
}
- if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
- dpaa2_switch_links_state_update(ethsw);
+ if_id = (status & 0xFFFF0000) >> 16;
+ port_priv = ethsw->ports[if_id];
+
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
+ dpaa2_switch_port_link_state_update(port_priv->netdev);
+ dpaa2_switch_port_set_mac_addr(port_priv);
+ }
+
+ if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ else
+ dpaa2_switch_port_connect_mac(port_priv);
+ }
out:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+
return IRQ_HANDLED;
}
@@ -1889,8 +1971,12 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
return notifier_from_errno(err);
}
+static struct notifier_block dpaa2_switch_port_switchdev_nb;
+static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
+
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
- struct net_device *upper_dev)
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -1906,8 +1992,8 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
other_port_priv = netdev_priv(other_dev);
if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
- netdev_err(netdev,
- "Interface from a different DPSW is in the bridge already!\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
return -EINVAL;
}
}
@@ -1929,8 +2015,16 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
if (err)
goto err_egress_flood;
+ err = switchdev_bridge_port_offload(netdev, netdev, NULL,
+ &dpaa2_switch_port_switchdev_nb,
+ &dpaa2_switch_port_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
return 0;
+err_switchdev_offload:
err_egress_flood:
dpaa2_switch_port_set_fdb(port_priv, NULL);
return err;
@@ -1956,6 +2050,13 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
}
+static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
+{
+ switchdev_bridge_port_unoffload(netdev, NULL,
+ &dpaa2_switch_port_switchdev_nb,
+ &dpaa2_switch_port_switchdev_blocking_nb);
+}
+
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
@@ -2029,6 +2130,28 @@ static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *net
return 0;
}
+static int
+dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!br_vlan_enabled(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot join a bridge while VLAN uppers are present");
+ return 0;
+ }
+
+ return 0;
+}
+
static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2049,25 +2172,23 @@ static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
if (!netif_is_bridge_master(upper_dev))
break;
- if (!br_vlan_enabled(upper_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
- err = -EOPNOTSUPP;
+ err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
+ upper_dev,
+ extack);
+ if (err)
goto out;
- }
- err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Cannot join a bridge while VLAN uppers are present");
- goto out;
- }
+ if (!info->linking)
+ dpaa2_switch_port_pre_bridge_leave(netdev);
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
- err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
+ err = dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
else
err = dpaa2_switch_port_bridge_leave(netdev);
}
@@ -2952,7 +3073,7 @@ static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
acl_entry.cfg.precedence = 0;
acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
- return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
+ return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
}
static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
@@ -2965,7 +3086,7 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
};
struct net_device *netdev = port_priv->netdev;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
struct dpsw_fdb_cfg fdb_cfg = {0};
struct dpsw_if_attr dpsw_if_attr;
struct dpaa2_switch_fdb *fdb;
@@ -3020,14 +3141,15 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
- acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
- acl_tbl->ethsw = ethsw;
- acl_tbl->id = acl_tbl_id;
- acl_tbl->in_use = true;
- acl_tbl->num_rules = 0;
- INIT_LIST_HEAD(&acl_tbl->entries);
+ filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
+ filter_block->ethsw = ethsw;
+ filter_block->acl_id = acl_tbl_id;
+ filter_block->in_use = true;
+ filter_block->num_acl_rules = 0;
+ INIT_LIST_HEAD(&filter_block->acl_entries);
+ INIT_LIST_HEAD(&filter_block->mirror_entries);
- err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+ err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
if (err)
return err;
@@ -3079,11 +3201,12 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
port_priv = ethsw->ports[i];
unregister_netdev(port_priv->netdev);
+ dpaa2_switch_port_disconnect_mac(port_priv);
free_netdev(port_priv->netdev);
}
kfree(ethsw->fdbs);
- kfree(ethsw->acls);
+ kfree(ethsw->filter_blocks);
kfree(ethsw->ports);
dpaa2_switch_teardown(sw_dev);
@@ -3156,6 +3279,10 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
goto err_port_probe;
port_priv->learn_ena = false;
+ err = dpaa2_switch_port_connect_mac(port_priv);
+ if (err)
+ goto err_port_probe;
+
return 0;
err_port_probe:
@@ -3209,9 +3336,10 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
goto err_free_ports;
}
- ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
- GFP_KERNEL);
- if (!ethsw->acls) {
+ ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
+ sizeof(*ethsw->filter_blocks),
+ GFP_KERNEL);
+ if (!ethsw->filter_blocks) {
err = -ENOMEM;
goto err_free_fdbs;
}
@@ -3231,17 +3359,16 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
&ethsw->fq[i].napi, dpaa2_switch_poll,
NAPI_POLL_WEIGHT);
- err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- if (err) {
- dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
- goto err_free_netdev;
- }
-
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
if (err)
goto err_stop;
+ /* By convention, if the mirror port is equal to the number of switch
+ * interfaces, then mirroring of any kind is disabled.
+ */
+ ethsw->mirror_port = ethsw->sw_attr.num_ifs;
+
/* Register the netdev only when the entire setup is done and the
* switch port interfaces are ready to receive traffic
*/
@@ -3264,7 +3391,7 @@ err_stop:
err_free_netdev:
for (i--; i >= 0; i--)
free_netdev(ethsw->ports[i]->netdev);
- kfree(ethsw->acls);
+ kfree(ethsw->filter_blocks);
err_free_fdbs:
kfree(ethsw->fdbs);
err_free_ports:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
index bdef71f234cb..0002dca4d417 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -21,6 +21,7 @@
#include <net/pkt_cls.h>
#include <soc/fsl/dpaa2-io.h>
+#include "dpaa2-mac.h"
#include "dpsw.h"
/* Number of IRQs supported */
@@ -113,20 +114,29 @@ struct dpaa2_switch_acl_entry {
struct dpsw_acl_key key;
};
-struct dpaa2_switch_acl_tbl {
- struct list_head entries;
+struct dpaa2_switch_mirror_entry {
+ struct list_head list;
+ struct dpsw_reflection_cfg cfg;
+ unsigned long cookie;
+ u16 if_id;
+};
+
+struct dpaa2_switch_filter_block {
struct ethsw_core *ethsw;
u64 ports;
-
- u16 id;
- u8 num_rules;
bool in_use;
+
+ struct list_head acl_entries;
+ u16 acl_id;
+ u8 num_acl_rules;
+
+ struct list_head mirror_entries;
};
static inline bool
-dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
{
- if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+ if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
return true;
return false;
@@ -149,7 +159,8 @@ struct ethsw_port_priv {
bool ucast_flood;
bool learn_ena;
- struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpaa2_mac *mac;
};
/* Switch data */
@@ -175,7 +186,8 @@ struct ethsw_core {
int napi_users;
struct dpaa2_switch_fdb *fdbs;
- struct dpaa2_switch_acl_tbl *acls;
+ struct dpaa2_switch_filter_block *filter_blocks;
+ u16 mirror_port;
};
static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
@@ -215,6 +227,22 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
return true;
}
+static inline bool
+dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
+{
+ if (port_priv->mac &&
+ (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->mac ? true : false;
+}
+
bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
int dpaa2_switch_port_vlans_add(struct net_device *netdev,
@@ -229,18 +257,24 @@ typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
/* TC offload */
-int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls);
-int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls);
-int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls);
-int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls);
-int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry);
+
+int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
+
+int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
+ struct ethsw_port_priv *port_priv);
#endif /* __ETHSW_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
index cb13e740f72b..397d55f2bd99 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
@@ -39,11 +39,16 @@
#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
+
#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034)
+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
+
#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
@@ -533,5 +538,19 @@ struct dpsw_cmd_acl_entry {
__le64 pad2[4];
__le64 key_iova;
};
+
+struct dpsw_cmd_set_reflection_if {
+ __le16 if_id;
+};
+
+#define DPSW_FILTER_SHIFT 0
+#define DPSW_FILTER_SIZE 2
+
+struct dpsw_cmd_if_reflection {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only 2 bits from the LSB */
+ u8 filter;
+};
#pragma pack(pop)
#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
index 6352d6d1ecba..ab921d75deb2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
@@ -1579,3 +1579,83 @@ int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpsw_set_reflection_if() - Set target interface for traffic mirrored
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Id
+ *
+ * Only one mirroring destination is allowed per switch
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id)
+{
+ struct dpsw_cmd_set_reflection_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_add_reflection() - Setup mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_remove_reflection() - Remove mirroring rule
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Reflection configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg)
+{
+ struct dpsw_cmd_if_reflection *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
index 5ef221a25b02..b90bd363f47a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
@@ -99,6 +99,11 @@ int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
/**
+ * DPSW_IRQ_EVENT_ENDPOINT_CHANGED - Indicates a change in endpoint
+ */
+#define DPSW_IRQ_EVENT_ENDPOINT_CHANGED 0x0002
+
+/**
* struct dpsw_irq_cfg - IRQ configuration
* @addr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
@@ -752,4 +757,35 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+/**
+ * enum dpsw_reflection_filter - Filter type for frames to be reflected
+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to
+ * the particular VLAN defined by vid parameter
+ *
+ */
+enum dpsw_reflection_filter {
+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
+};
+
+/**
+ * struct dpsw_reflection_cfg - Structure representing the mirroring config
+ * @filter: Filter type for frames to be mirrored
+ * @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN
+ */
+struct dpsw_reflection_cfg {
+ enum dpsw_reflection_filter filter;
+ u16 vlan_id;
+};
+
+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id);
+
+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
+
+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, const struct dpsw_reflection_cfg *cfg);
#endif /* __FSL_DPSW_H */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c84f6c226743..60d94e0a07d6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -735,7 +735,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
- .ndo_do_ioctl = enetc_ioctl,
+ .ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 03090ba7e226..1a9d1e8b772c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -99,7 +99,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
- .ndo_do_ioctl = enetc_ioctl,
+ .ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
};
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2e002e4b4b4a..7b4961daa254 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -77,6 +77,8 @@
#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
+#define FEC_LPI_SLEEP 0x1f4 /* Set IEEE802.3az LPI Sleep Ts time */
+#define FEC_LPI_WAKE 0x1f8 /* Set IEEE802.3az LPI Wake Tw time */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -187,6 +189,8 @@
#define FEC_RXIC0 0xfff
#define FEC_RXIC1 0xfff
#define FEC_RXIC2 0xfff
+#define FEC_LPI_SLEEP 0xfff
+#define FEC_LPI_WAKE 0xfff
#endif /* CONFIG_M5272 */
@@ -379,6 +383,9 @@ struct bufdesc_ex {
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+#define FEC_ENET_TXC_DLY ((uint)0x00010000)
+#define FEC_ENET_RXC_DLY ((uint)0x00020000)
+
/* ENET interrupt coalescing macro define */
#define FEC_ITR_CLK_SEL (0x1 << 30)
#define FEC_ITR_EN (0x1 << 31)
@@ -472,6 +479,22 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MULTI_QUEUES (1 << 19)
+/* i.MX8MQ ENET IP version add new feature to support IEEE 802.3az EEE
+ * standard. For the transmission, MAC supply two user registers to set
+ * Sleep (TS) and Wake (TW) time.
+ */
+#define FEC_QUIRK_HAS_EEE (1 << 20)
+
+/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+ * as an alternative option to make sure it works well with various PHYs.
+ * For the implementation of delayed clock, ENET takes synchronized 250MHz
+ * clocks to generate 2ns delay.
+ */
+#define FEC_QUIRK_DELAYED_CLKS_SUPPORT (1 << 21)
+
+/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
+#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -528,6 +551,7 @@ struct fec_enet_private {
struct clk *clk_ref;
struct clk *clk_enet_out;
struct clk *clk_ptp;
+ struct clk *clk_2x_txclk;
bool ptp_clk_on;
struct mutex ptp_clk_mutex;
@@ -550,6 +574,8 @@ struct fec_enet_private {
uint phy_speed;
phy_interface_t phy_interface;
struct device_node *phy_node;
+ bool rgmii_txc_dly;
+ bool rgmii_rxc_dly;
int link;
int full_duplex;
int speed;
@@ -557,6 +583,7 @@ struct fec_enet_private {
bool bufdesc_ex;
int pause_flag;
int wol_flag;
+ int wake_irq;
u32 quirks;
struct napi_struct napi;
@@ -589,6 +616,10 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
+ /* tx lpi eee mode */
+ struct ethtool_eee eee;
+ unsigned int clk_ref_rate;
+
u32 rx_copybreak;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7e4c4980ced7..83ab34b1d735 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -135,6 +135,26 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
};
+static const struct fec_devinfo fec_imx8mq_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+};
+
+static const struct fec_devinfo fec_imx8qm_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -162,6 +182,12 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx6ul-fec",
.driver_data = (kernel_ulong_t)&fec_imx6ul_info,
}, {
+ .name = "imx8mq-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx8mq_info,
+ }, {
+ .name = "imx8qm-fec",
+ .driver_data = (kernel_ulong_t)&fec_imx8qm_info,
+ }, {
/* sentinel */
}
};
@@ -175,6 +201,8 @@ enum imx_fec_type {
MVF600_FEC,
IMX6SX_FEC,
IMX6UL_FEC,
+ IMX8MQ_FEC,
+ IMX8QM_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -185,6 +213,8 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
+ { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
+ { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -1107,6 +1137,13 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
ecntl |= (1 << 4);
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_txc_dly)
+ ecntl |= FEC_ENET_TXC_DLY;
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_rxc_dly)
+ ecntl |= FEC_ENET_RXC_DLY;
+
#ifndef CONFIG_M5272
/* Enable the MIB statistic event counters */
writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
@@ -1970,6 +2007,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret)
goto failed_clk_ref;
+ ret = clk_prepare_enable(fep->clk_2x_txclk);
+ if (ret)
+ goto failed_clk_2x_txclk;
+
fec_enet_phy_reset_after_clk_enable(ndev);
} else {
clk_disable_unprepare(fep->clk_enet_out);
@@ -1980,10 +2021,14 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
mutex_unlock(&fep->ptp_clk_mutex);
}
clk_disable_unprepare(fep->clk_ref);
+ clk_disable_unprepare(fep->clk_2x_txclk);
}
return 0;
+failed_clk_2x_txclk:
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
failed_clk_ref:
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1997,6 +2042,34 @@ failed_clk_ptp:
return ret;
}
+static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
+ struct device_node *np)
+{
+ u32 rgmii_tx_delay, rgmii_rx_delay;
+
+ /* For rgmii tx internal delay, valid values are 0ps and 2000ps */
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
+ if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
+ dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
+ return -EINVAL;
+ } else if (rgmii_tx_delay == 2000) {
+ fep->rgmii_txc_dly = true;
+ }
+ }
+
+ /* For rgmii rx internal delay, valid values are 0ps and 2000ps */
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
+ if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
+ dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
+ return -EINVAL;
+ } else if (rgmii_rx_delay == 2000) {
+ fep->rgmii_rxc_dly = true;
+ }
+ }
+
+ return 0;
+}
+
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2692,6 +2765,92 @@ static int fec_enet_set_tunable(struct net_device *netdev,
return ret;
}
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+ int ret = 0;
+
+ if (enable) {
+ ret = phy_init_eee(ndev->phydev, 0);
+ if (ret)
+ return ret;
+
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ p->tx_lpi_enabled = enable;
+ p->eee_enabled = enable;
+ p->eee_active = enable;
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
+static int
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -ENETDOWN;
+
+ edata->eee_enabled = p->eee_enabled;
+ edata->eee_active = p->eee_active;
+ edata->tx_lpi_timer = p->tx_lpi_timer;
+ edata->tx_lpi_enabled = p->tx_lpi_enabled;
+
+ return phy_ethtool_get_eee(ndev->phydev, edata);
+}
+
+static int
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_eee *p = &fep->eee;
+ int ret = 0;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -ENETDOWN;
+
+ p->tx_lpi_timer = edata->tx_lpi_timer;
+
+ if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
+ !edata->tx_lpi_timer)
+ ret = fec_enet_eee_mode_set(ndev, false);
+ else
+ ret = fec_enet_eee_mode_set(ndev, true);
+
+ if (ret)
+ return ret;
+
+ return phy_ethtool_set_eee(ndev->phydev, edata);
+}
+
static void
fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
@@ -2719,12 +2878,12 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
if (device_may_wakeup(&ndev->dev)) {
fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
- if (fep->irq[0] > 0)
- enable_irq_wake(fep->irq[0]);
+ if (fep->wake_irq > 0)
+ enable_irq_wake(fep->wake_irq);
} else {
fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
- if (fep->irq[0] > 0)
- disable_irq_wake(fep->irq[0]);
+ if (fep->wake_irq > 0)
+ disable_irq_wake(fep->wake_irq);
}
return 0;
@@ -2752,6 +2911,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
+ .get_eee = fec_enet_get_eee,
+ .set_eee = fec_enet_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.self_test = net_selftest,
@@ -3280,7 +3441,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_do_ioctl = fec_enet_ioctl,
+ .ndo_eth_ioctl = fec_enet_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fec_poll_controller,
#endif
@@ -3535,6 +3696,17 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
return irq_cnt;
}
+static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
+ fep->wake_irq = fep->irq[2];
+ else
+ fep->wake_irq = fep->irq[0];
+}
+
static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
struct device_node *np)
{
@@ -3666,6 +3838,10 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = interface;
}
+ ret = fec_enet_parse_rgmii_delay(fep, np);
+ if (ret)
+ goto failed_rgmii_delay;
+
fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fep->clk_ipg)) {
ret = PTR_ERR(fep->clk_ipg);
@@ -3692,6 +3868,14 @@ fec_probe(struct platform_device *pdev)
fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
if (IS_ERR(fep->clk_ref))
fep->clk_ref = NULL;
+ fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
+
+ /* clk_2x_txclk is optional, depends on board */
+ if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
+ fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
+ if (IS_ERR(fep->clk_2x_txclk))
+ fep->clk_2x_txclk = NULL;
+ }
fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
@@ -3762,6 +3946,9 @@ fec_probe(struct platform_device *pdev)
fep->irq[i] = irq;
}
+ /* Decide which interrupt line is wakeup capable */
+ fec_enet_get_wakeup_irq(pdev);
+
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
@@ -3809,6 +3996,7 @@ failed_clk_ahb:
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
+failed_rgmii_delay:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 02c47658a215..73ff359a15f1 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -792,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
.ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
.ndo_set_mac_address = mpc52xx_fec_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = mpc52xx_fec_tx_timeout,
.ndo_get_stats = mpc52xx_fec_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 6ee325ad35c5..2db6e38a772e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -900,7 +900,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
.ndo_start_xmit = fs_enet_start_xmit,
.ndo_tx_timeout = fs_timeout,
.ndo_set_rx_mode = fs_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9646483137c4..af6ad94bf24a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3184,7 +3184,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_do_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = gfar_ioctl,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0acfafb73db1..3eb288d10b0c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3516,7 +3516,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
- .ndo_do_ioctl = ucc_geth_ioctl,
+ .ndo_eth_ioctl = ucc_geth_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ucc_netpoll,
#endif
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 5bb56b454541..f089d33dd48e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -322,7 +322,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
+ (tail & priv->adminq_mask)) {
int err;
// Flush existing commands to make room.
@@ -332,7 +333,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
// Retry.
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
+ (tail & priv->adminq_mask)) {
// This should never happen. We just flushed the
// command queue so there should be enough space.
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index bb062b02fb85..3312e1d93c3b 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -90,6 +90,8 @@ config HNS_ENET
config HNS3
tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
depends on PCI
+ select NET_DEVLINK
+ select PAGE_POOL
help
This selects the framework support for Hisilicon Network Subsystem 3.
This layer facilitates clients like ENET, RoCE and user-space ethernet
@@ -102,7 +104,7 @@ config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
default m
depends on PCI_MSI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 3c4db4a6b431..22bf914f2dbd 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -685,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = {
.ndo_open = hisi_femac_net_open,
.ndo_stop = hisi_femac_net_close,
.ndo_start_xmit = hisi_femac_net_xmit,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = hisi_femac_set_mac_address,
.ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ad534f9e41ab..343c605c4be8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1945,7 +1945,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_tx_timeout = hns_nic_net_timeout,
.ndo_set_mac_address = hns_nic_net_set_mac_address,
.ndo_change_mtu = hns_nic_change_mtu,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index e0b7c3c44e7b..848bed866193 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -718,6 +718,8 @@ struct hnae3_ae_ops {
u32 nsec, u32 sec);
int (*get_ts_info)(struct hnae3_handle *handle,
struct ethtool_ts_info *info);
+ int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
+ u32 *status_code);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index cdb5f14fb6bc..fcbeb1fbe5b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2852,7 +2852,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_start_xmit = hns3_nic_net_xmit,
.ndo_tx_timeout = hns3_nic_net_timeout,
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
- .ndo_do_ioctl = hns3_nic_do_ioctl,
+ .ndo_eth_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
.ndo_features_check = hns3_features_check,
@@ -3205,6 +3205,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
unsigned int order = hns3_page_order(ring);
struct page *p;
+ if (ring->page_pool) {
+ p = page_pool_dev_alloc_frag(ring->page_pool,
+ &cb->page_offset,
+ hns3_buf_size(ring));
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->buf = page_address(p);
+ cb->dma = page_pool_get_dma_addr(p);
+ cb->type = DESC_TYPE_PP_FRAG;
+ cb->reuse_flag = 0;
+ return 0;
+ }
+
p = dev_alloc_pages(order);
if (!p)
return -ENOMEM;
@@ -3227,8 +3242,13 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring,
if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
napi_consume_skb(cb->priv, budget);
- else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
- __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
+ else if (!HNAE3_IS_TX_RING(ring)) {
+ if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
+ else if (cb->type & DESC_TYPE_PP_FRAG)
+ page_pool_put_full_page(ring->page_pool, cb->priv,
+ false);
+ }
memset(cb, 0, sizeof(*cb));
}
@@ -3315,7 +3335,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
int ret;
ret = hns3_alloc_buffer(ring, cb);
- if (ret)
+ if (ret || ring->page_pool)
goto out;
ret = hns3_map_buffer(ring, cb);
@@ -3337,7 +3357,8 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
if (ret)
return ret;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
return 0;
}
@@ -3367,7 +3388,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
}
@@ -3539,6 +3561,12 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
u32 frag_size = size - pull_len;
bool reused;
+ if (ring->page_pool) {
+ skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
+ frag_size, truesize);
+ return;
+ }
+
/* Avoid re-using remote or pfmem page */
if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
goto out;
@@ -3856,6 +3884,9 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
/* We can reuse buffer as-is, just make sure it is reusable */
if (dev_page_is_reusable(desc_cb->priv))
desc_cb->reuse_flag = 1;
+ else if (desc_cb->type & DESC_TYPE_PP_FRAG)
+ page_pool_put_full_page(ring->page_pool, desc_cb->priv,
+ false);
else /* This page cannot be reused so discard it */
__page_frag_cache_drain(desc_cb->priv,
desc_cb->pagecnt_bias);
@@ -3863,6 +3894,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
hns3_rx_ring_move_fw(ring);
return 0;
}
+
+ if (ring->page_pool)
+ skb_mark_for_recycle(skb);
+
u64_stats_update_begin(&ring->syncp);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
@@ -3901,6 +3936,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
"alloc rx fraglist skb fail\n");
return -ENXIO;
}
+
+ if (ring->page_pool)
+ skb_mark_for_recycle(new_skb);
+
ring->frag_num = 0;
if (ring->tail_skb) {
@@ -4705,6 +4744,29 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
priv->ring = NULL;
}
+static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
+{
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
+ PP_FLAG_DMA_SYNC_DEV,
+ .order = hns3_page_order(ring),
+ .pool_size = ring->desc_num * hns3_buf_size(ring) /
+ (PAGE_SIZE << hns3_page_order(ring)),
+ .nid = dev_to_node(ring_to_dev(ring)),
+ .dev = ring_to_dev(ring),
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE << hns3_page_order(ring),
+ };
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
+ PTR_ERR(ring->page_pool));
+ ring->page_pool = NULL;
+ }
+}
+
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@@ -4724,6 +4786,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) {
+ hns3_alloc_page_pool(ring);
+
ret = hns3_alloc_ring_buffers(ring);
if (ret)
goto out_with_desc;
@@ -4764,6 +4828,11 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
devm_kfree(ring_to_dev(ring), tx_spare);
ring->tx_spare = NULL;
}
+
+ if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int hns3_buf_size2type(u32 buf_size)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 15af3d93857b..b0e696b08b8b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -6,6 +6,7 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
+#include <net/page_pool.h>
#include "hnae3.h"
@@ -307,6 +308,7 @@ enum hns3_desc_type {
DESC_TYPE_BOUNCE_ALL = 1 << 3,
DESC_TYPE_BOUNCE_HEAD = 1 << 4,
DESC_TYPE_SGL_SKB = 1 << 5,
+ DESC_TYPE_PP_FRAG = 1 << 6,
};
struct hns3_desc_cb {
@@ -451,6 +453,7 @@ struct hns3_enet_ring {
struct hnae3_queue *tqp;
int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */
+ struct page_pool *page_pool;
/* statistic */
struct ring_stats stats;
@@ -593,6 +596,11 @@ struct hns3_hw_error_info {
const char *msg;
};
+struct hns3_reset_type_map {
+ enum ethtool_reset_flags rst_flags;
+ enum hnae3_reset_type rst_type;
+};
+
static inline int ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 82061ab6930f..835105015763 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -7,21 +7,7 @@
#include <linux/sfp.h>
#include "hns3_enet.h"
-
-struct hns3_stats {
- char stats_string[ETH_GSTRING_LEN];
- int stats_offset;
-};
-
-struct hns3_sfp_type {
- u8 type;
- u8 ext_type;
-};
-
-struct hns3_pflag_desc {
- char name[ETH_GSTRING_LEN];
- void (*handler)(struct net_device *netdev, bool enable);
-};
+#include "hns3_ethtool.h"
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
@@ -953,6 +939,60 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
}
+static const struct hns3_reset_type_map hns3_reset_type[] = {
+ {ETH_RESET_MGMT, HNAE3_IMP_RESET},
+ {ETH_RESET_ALL, HNAE3_GLOBAL_RESET},
+ {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET},
+};
+
+static const struct hns3_reset_type_map hns3vf_reset_type[] = {
+ {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET},
+};
+
+static int hns3_set_reset(struct net_device *netdev, u32 *flags)
+{
+ enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hns3_reset_type_map *rst_type_map;
+ u32 i, size;
+
+ if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
+ return -EBUSY;
+
+ if (!ops->set_default_reset_request || !ops->reset_event)
+ return -EOPNOTSUPP;
+
+ if (h->flags & HNAE3_SUPPORT_VF) {
+ rst_type_map = hns3vf_reset_type;
+ size = ARRAY_SIZE(hns3vf_reset_type);
+ } else {
+ rst_type_map = hns3_reset_type;
+ size = ARRAY_SIZE(hns3_reset_type);
+ }
+
+ for (i = 0; i < size; i++) {
+ if (rst_type_map[i].rst_flags == *flags) {
+ rst_type = rst_type_map[i].rst_type;
+ break;
+ }
+ }
+
+ if (rst_type == HNAE3_NONE_RESET ||
+ (rst_type == HNAE3_IMP_RESET &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2))
+ return -EOPNOTSUPP;
+
+ netdev_info(netdev, "Setting reset type %d\n", rst_type);
+
+ ops->set_default_reset_request(ae_dev, rst_type);
+
+ ops->reset_event(h->pdev, h);
+
+ return 0;
+}
+
static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
u32 tx_desc_num, u32 rx_desc_num)
{
@@ -1671,6 +1711,71 @@ static int hns3_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
}
+static const struct hns3_ethtool_link_ext_state_mapping
+hns3_link_ext_state_map[] = {
+ {1, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+ {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+
+ {256, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+ {257, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+ {512, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+ {513, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+ {514, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+ {515, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+ {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+ {769, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST},
+ {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS},
+
+ {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+ {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+
+ {1026, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+};
+
+static int hns3_get_link_ext_state(struct net_device *netdev,
+ struct ethtool_link_ext_state_info *info)
+{
+ const struct hns3_ethtool_link_ext_state_mapping *map;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 status_code, i;
+ int ret;
+
+ if (netif_carrier_ok(netdev))
+ return -ENODATA;
+
+ if (!h->ae_algo->ops->get_link_diagnosis_info)
+ return -EOPNOTSUPP;
+
+ ret = h->ae_algo->ops->get_link_diagnosis_info(h, &status_code);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_link_ext_state_map); i++) {
+ map = &hns3_link_ext_state_map[i];
+ if (map->status_code == status_code) {
+ info->link_ext_state = map->link_ext_state;
+ info->__link_ext_substate = map->link_ext_substate;
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
@@ -1699,6 +1804,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.set_priv_flags = hns3_set_priv_flags,
.get_tunable = hns3_get_tunable,
.set_tunable = hns3_set_tunable,
+ .reset = hns3_set_reset,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1740,6 +1846,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ts_info = hns3_get_ts_info,
.get_tunable = hns3_get_tunable,
.set_tunable = hns3_set_tunable,
+ .reset = hns3_set_reset,
+ .get_link_ext_state = hns3_get_link_ext_state,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
new file mode 100644
index 000000000000..822d6fcbc73b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HNS3_ETHTOOL_H
+#define __HNS3_ETHTOOL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct hns3_stats {
+ char stats_string[ETH_GSTRING_LEN];
+ int stats_offset;
+};
+
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
+struct hns3_pflag_desc {
+ char name[ETH_GSTRING_LEN];
+ void (*handler)(struct net_device *netdev, bool enable);
+};
+
+struct hns3_ethtool_link_ext_state_mapping {
+ u32 status_code;
+ enum ethtool_link_ext_state link_ext_state;
+ u8 link_ext_substate;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index a685392dbfe9..d1bf5c4c0abb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 887297e37cf3..13042f1cac6f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -169,17 +169,19 @@ static bool hclge_is_special_opcode(u16 opcode)
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
- u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT,
- HCLGE_OPC_STATS_MAC,
- HCLGE_OPC_STATS_MAC_ALL,
- HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT,
- HCLGE_QUERY_CLEAR_PF_RAS_INT,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
- HCLGE_QUERY_ALL_ERR_INFO};
+ static const u16 spec_opcode[] = {
+ HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO
+ };
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 18bde77ef944..8e5be127909b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -316,6 +316,9 @@ enum hclge_opcode_type {
/* PHY command */
HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
HCLGE_OPC_PHY_REG = 0x7026,
+
+ /* Query link diagnosis info command */
+ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
new file mode 100644
index 000000000000..e4aad695abcc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclge_devlink.h"
+
+static int hclge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGE_DEVLINK_FW_STRING_LEN 32
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclge_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclge_devlink_ops = {
+ .info_get = hclge_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclge_devlink_reload_down,
+ .reload_up = hclge_devlink_reload_up,
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_devlink_priv *priv;
+ struct devlink *devlink;
+ int ret;
+
+ devlink = devlink_alloc(&hclge_devlink_ops,
+ sizeof(struct hclge_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ ret = devlink_register(devlink);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
+ ret);
+ goto out_reg_fail;
+ }
+
+ devlink_reload_enable(devlink);
+
+ return 0;
+
+out_reg_fail:
+ devlink_free(devlink);
+ return ret;
+}
+
+void hclge_devlink_uninit(struct hclge_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_reload_disable(devlink);
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
new file mode 100644
index 000000000000..918be04507a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEVLINK_H
+#define __HCLGE_DEVLINK_H
+
+#include "hclge_main.h"
+
+struct hclge_devlink_priv {
+ struct hclge_dev *hdev;
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev);
+void hclge_devlink_uninit(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ebeaf12e409b..8779a63d51b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -23,6 +23,7 @@
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
+#include "hclge_devlink.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
@@ -3788,6 +3789,12 @@ static void hclge_do_reset(struct hclge_dev *hdev)
}
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ dev_info(&pdev->dev, "IMP reset requested\n");
+ val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
+ break;
case HNAE3_GLOBAL_RESET:
dev_info(&pdev->dev, "global reset requested\n");
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
@@ -11482,10 +11489,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_pci_uninit;
+
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
if (ret)
- goto err_pci_uninit;
+ goto err_devlink_uninit;
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -11658,6 +11669,8 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_cmd_uninit(hdev);
+err_devlink_uninit:
+ hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev);
@@ -12048,6 +12061,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
+ hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
@@ -12829,6 +12843,29 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
return 0;
}
+static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
+ u32 *status_code)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query link diagnosis info, ret = %d\n", ret);
+ return ret;
+ }
+
+ *status_code = le32_to_cpu(desc.data[0]);
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12929,6 +12966,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_tx_hwts_info = hclge_ptp_set_tx_info,
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
+ .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 3d3352491dba..ada5c68f2851 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,6 +8,7 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+#include <net/devlink.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -193,6 +194,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
+#define HCLGE_TRIGGER_IMP_RESET_B 7U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -943,6 +945,7 @@ struct hclge_dev {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
+ struct devlink *devlink;
};
/* VPort level vlan tag configuration for TX direction */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 2c26ea607a53..51ff7d86ee90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -7,4 +7,4 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
new file mode 100644
index 000000000000..f478770299c6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclgevf_devlink.h"
+
+static int hclgevf_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGEVF_DEVLINK_FW_STRING_LEN 32
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN];
+ struct hclgevf_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclgevf_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclgevf_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclgevf_devlink_ops = {
+ .info_get = hclgevf_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclgevf_devlink_reload_down,
+ .reload_up = hclgevf_devlink_reload_up,
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_devlink_priv *priv;
+ struct devlink *devlink;
+ int ret;
+
+ devlink =
+ devlink_alloc(&hclgevf_devlink_ops,
+ sizeof(struct hclgevf_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ ret = devlink_register(devlink);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
+ ret);
+ goto out_reg_fail;
+ }
+
+ devlink_reload_enable(devlink);
+
+ return 0;
+
+out_reg_fail:
+ devlink_free(devlink);
+ return ret;
+}
+
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_reload_disable(devlink);
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
new file mode 100644
index 000000000000..e09ea3d8a963
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_DEVLINK_H
+#define __HCLGEVF_DEVLINK_H
+
+#include "hclgevf_main.h"
+
+struct hclgevf_devlink_priv {
+ struct hclgevf_dev *hdev;
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev);
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8784d61e833f..3a19f08bfff3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -8,6 +8,7 @@
#include "hclgevf_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclgevf_devlink.h"
#define HCLGEVF_NAME "hclgevf"
@@ -3337,6 +3338,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_devlink_init;
+
ret = hclgevf_cmd_queue_init(hdev);
if (ret)
goto err_cmd_queue_init;
@@ -3441,6 +3446,8 @@ err_misc_irq_init:
err_cmd_init:
hclgevf_cmd_uninit(hdev);
err_cmd_queue_init:
+ hclgevf_devlink_uninit(hdev);
+err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
@@ -3462,6 +3469,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
}
hclgevf_cmd_uninit(hdev);
+ hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index d7d02848d674..6f222a3a0bf2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -6,6 +6,7 @@
#include <linux/fs.h>
#include <linux/if_vlan.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
@@ -330,6 +331,8 @@ struct hclgevf_dev {
u32 flag;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+
+ struct devlink *devlink;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 58d5646444b0..6e11ee339f12 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -293,9 +293,9 @@ static const struct devlink_ops hinic_devlink_ops = {
.flash_update = hinic_devlink_flash_update,
};
-struct devlink *hinic_devlink_alloc(void)
+struct devlink *hinic_devlink_alloc(struct device *dev)
{
- return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev));
+ return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev), dev);
}
void hinic_devlink_free(struct devlink *devlink)
@@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev)
+int hinic_devlink_register(struct hinic_devlink_priv *priv)
{
struct devlink *devlink = priv_to_devlink(priv);
- return devlink_register(devlink, dev);
+ return devlink_register(devlink);
}
void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
index a090ebcfaabb..9e315011015c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
@@ -108,9 +108,9 @@ struct host_image_st {
u32 device_id;
};
-struct devlink *hinic_devlink_alloc(void);
+struct devlink *hinic_devlink_alloc(struct device *dev);
void hinic_devlink_free(struct devlink *devlink);
-int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev);
+int hinic_devlink_register(struct hinic_devlink_priv *priv);
void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 428108eb10d2..56b6b04e209b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -754,7 +754,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- err = hinic_devlink_register(hwdev->devlink_dev, &pdev->dev);
+ err = hinic_devlink_register(hwdev->devlink_dev);
if (err) {
dev_err(&hwif->pdev->dev, "Failed to register devlink\n");
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 405ee4d2d2b1..881d0b247561 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -1183,7 +1183,7 @@ static int nic_dev_init(struct pci_dev *pdev)
struct devlink *devlink;
int err, num_qps;
- devlink = hinic_devlink_alloc();
+ devlink = hinic_devlink_alloc(&pdev->dev);
if (!devlink) {
dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index f8a26459ff65..a78c398bf5b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -836,8 +836,10 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
int hinic_ndo_set_vf_bw(struct net_device *netdev,
int vf, int min_tx_rate, int max_tx_rate)
{
- u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
- SPEED_25000, SPEED_40000, SPEED_100000};
+ static const u32 speeds[] = {
+ SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000
+ };
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_port_cap port_cap = { 0 };
enum hinic_port_link_state link_state;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index fc8c7cd67471..b8a40146b895 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1110,9 +1110,6 @@ static void print_eth(unsigned char *add, char *str)
add, add + 6, add, add[12], add[13], str);
}
-static int io = 0x300;
-static int irq = 10;
-
static const struct net_device_ops i596_netdev_ops = {
.ndo_open = i596_open,
.ndo_stop = i596_close,
@@ -1123,7 +1120,7 @@ static const struct net_device_ops i596_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-struct net_device * __init i82596_probe(int unit)
+static struct net_device * __init i82596_probe(void)
{
struct net_device *dev;
int i;
@@ -1140,14 +1137,6 @@ struct net_device * __init i82596_probe(int unit)
if (!dev)
return ERR_PTR(-ENOMEM);
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- } else {
- dev->base_addr = io;
- dev->irq = irq;
- }
-
#ifdef ENABLE_MVME16x_NET
if (MACH_IS_MVME16x) {
if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
@@ -1515,22 +1504,22 @@ static void set_multicast_list(struct net_device *dev)
}
}
-#ifdef MODULE
static struct net_device *dev_82596;
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "i82596 debug mask");
-int __init init_module(void)
+static int __init i82596_init(void)
{
if (debug >= 0)
i596_debug = debug;
- dev_82596 = i82596_probe(-1);
+ dev_82596 = i82596_probe();
return PTR_ERR_OR_ZERO(dev_82596);
}
+module_init(i82596_init);
-void __exit cleanup_module(void)
+static void __exit i82596_cleanup(void)
{
unregister_netdev(dev_82596);
#ifdef __mc68000__
@@ -1544,5 +1533,4 @@ void __exit cleanup_module(void)
free_page ((u32)(dev_82596->mem_start));
free_netdev(dev_82596);
}
-
-#endif /* MODULE */
+module_exit(i82596_cleanup);
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 4564ee02c95f..893e0ddcb611 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -29,6 +29,7 @@ static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
static int fifo=0x8; /* don't change */
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -276,7 +277,7 @@ static void alloc586(struct net_device *dev)
memset((char *)p->scb,0,sizeof(struct scb_struct));
}
-struct net_device * __init sun3_82586_probe(int unit)
+static int __init sun3_82586_probe(void)
{
struct net_device *dev;
unsigned long ioaddr;
@@ -291,25 +292,20 @@ struct net_device * __init sun3_82586_probe(int unit)
break;
default:
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
if (found)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE);
if (!ioaddr)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
found = 1;
dev = alloc_etherdev(sizeof(struct priv));
if (!dev)
goto out;
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
dev->irq = IE_IRQ;
dev->base_addr = ioaddr;
err = sun3_82586_probe1(dev, ioaddr);
@@ -326,8 +322,9 @@ out1:
free_netdev(dev);
out:
iounmap((void __iomem *)ioaddr);
- return ERR_PTR(err);
+ return err;
}
+module_init(sun3_82586_probe);
static const struct net_device_ops sun3_82586_netdev_ops = {
.ndo_open = sun3_82586_open,
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 471be6ec7e8a..664a91af662d 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -3011,7 +3011,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_stop = emac_close,
.ndo_get_stats = emac_stats,
.ndo_set_rx_mode = emac_set_multicast_list,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_eth_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
@@ -3023,7 +3023,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_stop = emac_close,
.ndo_get_stats = emac_stats,
.ndo_set_rx_mode = emac_set_multicast_list,
- .ndo_do_ioctl = emac_ioctl,
+ .ndo_eth_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = emac_set_mac_address,
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 737ba85e409f..3d9b4f99d357 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1630,7 +1630,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_stop = ibmveth_close,
.ndo_start_xmit = ibmveth_start_xmit,
.ndo_set_rx_mode = ibmveth_set_multicast_list,
- .ndo_do_ioctl = ibmveth_ioctl,
+ .ndo_eth_ioctl = ibmveth_ioctl,
.ndo_change_mtu = ibmveth_change_mtu,
.ndo_fix_features = ibmveth_fix_features,
.ndo_set_features = ibmveth_set_features,
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 82744a7501c7..b0b6f90deb7d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -58,8 +58,8 @@ config E1000
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
+ depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
- imply PTP_1588_CLOCK
help
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -87,7 +87,7 @@ config E1000E_HWTS
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
select I2C
select I2C_ALGOBIT
help
@@ -159,9 +159,9 @@ config IXGB
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
select PHYLIB
- imply PTP_1588_CLOCK
help
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -239,7 +239,7 @@ config IXGBEVF_IPSEC
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI
select AUXILIARY_BUS
help
@@ -295,11 +295,11 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
select AUXILIARY_BUS
select DIMLIB
select NET_DEVLINK
select PLDMFW
- imply PTP_1588_CLOCK
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
@@ -317,7 +317,7 @@ config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
depends on PCI_MSI
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 1b0958bd24f6..373eb027b925 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2715,10 +2715,10 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
+ memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test));
break;
case ETH_SS_STATS:
- memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
+ memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats));
break;
}
}
@@ -2809,7 +2809,7 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = e100_set_multicast_list,
.ndo_set_mac_address = e100_set_mac_address,
- .ndo_do_ioctl = e100_do_ioctl,
+ .ndo_eth_ioctl = e100_do_ioctl,
.ndo_tx_timeout = e100_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e100_netpoll,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index c2a109126c27..bed4f040face 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -832,7 +832,7 @@ static const struct net_device_ops e1000_netdev_ops = {
.ndo_set_mac_address = e1000_set_mac,
.ndo_tx_timeout = e1000_tx_timeout,
.ndo_change_mtu = e1000_change_mtu,
- .ndo_do_ioctl = e1000_ioctl,
+ .ndo_eth_ioctl = e1000_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 06442e6bef73..7256b43b7a65 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -903,6 +903,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
mask |= BIT(18);
break;
default:
@@ -1569,6 +1570,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index db79c4e6413e..bcf680e83811 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -98,14 +98,22 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
+#define E1000_DEV_ID_PCH_RPL_I219_LM23 0x0DC5
+#define E1000_DEV_ID_PCH_RPL_I219_V23 0x0DC6
#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
+#define E1000_DEV_ID_PCH_RPL_I219_LM22 0x0DC7
+#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
+#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
+#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
+#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
+#define E1000_DEV_ID_PCH_LNP_I219_V21 0x5511
#define E1000_REVISION_4 4
@@ -132,6 +140,7 @@ enum e1000_mac_type {
e1000_pch_tgp,
e1000_pch_adp,
e1000_pch_mtp,
+ e1000_pch_lnp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index cf7b3887da1d..2f97c9f5611d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -321,6 +321,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -466,6 +467,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -711,6 +713,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1266,9 +1269,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
usleep_range(10000, 11000);
}
if (firmware_bug)
- e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n",
+ i * 10);
else
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ e_dbg("ULP_CONFIG_DONE cleared after %d msec\n",
+ i * 10);
if (force) {
mac_reg = er32(H2ME);
@@ -1663,6 +1668,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2118,6 +2124,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3162,6 +3169,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4101,6 +4109,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 1502895eb45d..9b145f6248a8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -41,12 +41,15 @@
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
+#define E1000_EXFWSM_DPG_EXIT_DONE 0x00000001
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_START_DPG 0x00000001 /* indicate the ME of DPG */
+#define E1000_H2ME_EXIT_DPG 0x00000002 /* indicate the ME exit DPG */
#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 757a54c39eef..900b3ab998bd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3550,6 +3550,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4068,6 +4069,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -6343,42 +6345,110 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- /* Disable the periodic inband message,
- * don't request PCIe clock in K1 page770_17[10:9] = 10b
- */
- e1e_rphy(hw, HV_PM_CTRL, &phy_data);
- phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
- phy_data |= BIT(10);
- e1e_wphy(hw, HV_PM_CTRL, phy_data);
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure the device for S0ix */
+ mac_data = er32(H2ME);
+ mac_data |= E1000_H2ME_START_DPG;
+ mac_data &= ~E1000_H2ME_EXIT_DPG;
+ ew32(H2ME, mac_data);
+ } else {
+ /* Request driver configure the device to S0ix */
+ /* Disable the periodic inband message,
+ * don't request PCIe clock in K1 page770_17[10:9] = 10b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
+ phy_data |= BIT(10);
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
- /* Make sure we don't exit K1 every time a new packet arrives
- * 772_29[5] = 1 CS_Mode_Stay_In_K1
- */
- e1e_rphy(hw, I217_CGFREG, &phy_data);
- phy_data |= BIT(5);
- e1e_wphy(hw, I217_CGFREG, phy_data);
+ /* Make sure we don't exit K1 every time a new packet arrives
+ * 772_29[5] = 1 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data |= BIT(5);
+ e1e_wphy(hw, I217_CGFREG, phy_data);
- /* Change the MAC/PHY interface to SMBus
- * Force the SMBus in PHY page769_23[0] = 1
- * Force the SMBus in MAC CTRL_EXT[11] = 1
- */
- e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
- phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, phy_data);
- mac_data = er32(CTRL_EXT);
- mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_data);
+ /* Change the MAC/PHY interface to SMBus
+ * Force the SMBus in PHY page769_23[0] = 1
+ * Force the SMBus in MAC CTRL_EXT[11] = 1
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+
+ /* DFT control: PHY bit: page769_20[0] = 1
+ * page769_20[7] - PHY PLL stop
+ * page769_20[8] - PHY go to the electrical idle
+ * page769_20[9] - PHY serdes disable
+ * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
+ */
+ e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
+ phy_data |= BIT(0);
+ phy_data |= BIT(7);
+ phy_data |= BIT(8);
+ phy_data |= BIT(9);
+ e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+
+ mac_data = er32(EXTCNF_CTRL);
+ mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, mac_data);
+
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable disconnected cable conditioning for Power Gating */
+ mac_data = er32(DPGFR);
+ mac_data |= BIT(2);
+ ew32(DPGFR, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Enable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+ }
- /* DFT control: PHY bit: page769_20[0] = 1
- * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
- */
- e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
- phy_data |= BIT(0);
- e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+ /* Disable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(31);
+ mac_data &= ~BIT(0);
+ ew32(FEXTNVM7, mac_data);
- mac_data = er32(EXTCNF_CTRL);
- mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
- ew32(EXTCNF_CTRL, mac_data);
+ /* Dynamic Power Gating Enable */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= BIT(3);
+ ew32(CTRL_EXT, mac_data);
/* Check MAC Tx/Rx packet buffer pointers.
* Reset MAC Tx/Rx packet buffer pointers to suppress any
@@ -6414,148 +6484,130 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data = er32(RDFPC);
if (mac_data)
ew32(RDFPC, 0);
-
- /* Enable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(22);
- ew32(FEXTNVM7, mac_data);
-
- /* Disable the time synchronization clock */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(31);
- mac_data &= ~BIT(0);
- ew32(FEXTNVM7, mac_data);
-
- /* Dynamic Power Gating Enable */
- mac_data = er32(CTRL_EXT);
- mac_data |= BIT(3);
- ew32(CTRL_EXT, mac_data);
-
- /* Disable disconnected cable conditioning for Power Gating */
- mac_data = er32(DPGFR);
- mac_data |= BIT(2);
- ew32(DPGFR, mac_data);
-
- /* Don't wake from dynamic Power Gating with clock request */
- mac_data = er32(FEXTNVM12);
- mac_data |= BIT(12);
- ew32(FEXTNVM12, mac_data);
-
- /* Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Enable K1 off to enable mPHY Power Gating */
- mac_data = er32(FEXTNVM6);
- mac_data |= BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Enable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data |= BIT(9);
- ew32(FEXTNVM8, mac_data);
-
- /* Enable the Dynamic Clock Gating in the DMA and MAC */
- mac_data = er32(CTRL_EXT);
- mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
- ew32(CTRL_EXT, mac_data);
-
- /* No MAC DPG gating SLP_S0 in modern standby
- * Switch the logic of the lanphypc to use PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data |= BIT(7);
- ew32(FEXTNVM5, mac_data);
}
static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ bool firmware_bug = false;
u32 mac_data;
u16 phy_data;
+ u32 i = 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME unconfigure the device from S0ix */
+ mac_data = er32(H2ME);
+ mac_data &= ~E1000_H2ME_START_DPG;
+ mac_data |= E1000_H2ME_EXIT_DPG;
+ ew32(H2ME, mac_data);
+
+ /* Poll up to 2.5 seconds for ME to unconfigure DPG.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
+ while (!(er32(EXFWSM) & E1000_EXFWSM_DPG_EXIT_DONE)) {
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
- /* Disable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data &= 0xFFBFFFFF;
- ew32(FEXTNVM7, mac_data);
+ if (i++ == 250) {
+ e_dbg("Timeout (firmware bug): %d msec\n",
+ i * 10);
+ break;
+ }
- /* Enable the time synchronization clock */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(0);
- ew32(FEXTNVM7, mac_data);
+ usleep_range(10000, 11000);
+ }
+ if (firmware_bug)
+ e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n",
+ i * 10);
+ else
+ e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i * 10);
+ } else {
+ /* Request driver unconfigure the device from S0ix */
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
- /* Disable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data &= ~BIT(9);
- ew32(FEXTNVM8, mac_data);
+ /* Cancel disable disconnected cable conditioning
+ * for Power Gating
+ */
+ mac_data = er32(DPGFR);
+ mac_data &= ~BIT(2);
+ ew32(DPGFR, mac_data);
- /* Disable K1 off */
- mac_data = er32(FEXTNVM6);
- mac_data &= ~BIT(31);
- ew32(FEXTNVM6, mac_data);
+ /* Disable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFF7FFFF;
+ ew32(CTRL_EXT, mac_data);
- /* Disable Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
- ew32(FEXTNVM9, mac_data);
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
- /* Cancel not waking from dynamic
- * Power Gating with clock request
- */
- mac_data = er32(FEXTNVM12);
- mac_data &= ~BIT(12);
- ew32(FEXTNVM12, mac_data);
+ /* Enable the periodic inband message,
+ * Request PCIe clock in K1 page770_17[10:9] =01b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= 0xFBFF;
+ phy_data |= HV_PM_CTRL_K1_CLK_REQ;
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
- /* Cancel disable disconnected cable conditioning
- * for Power Gating
- */
- mac_data = er32(DPGFR);
- mac_data &= ~BIT(2);
- ew32(DPGFR, mac_data);
+ /* Return back configuration
+ * 772_29[5] = 0 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data &= 0xFFDF;
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to Kumeran
+ * Unforce the SMBus in PHY page769_23[0] = 0
+ * Unforce the SMBus in MAC CTRL_EXT[11] = 0
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+ }
/* Disable Dynamic Power Gating */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFFFFFF7;
ew32(CTRL_EXT, mac_data);
- /* Disable the Dynamic Clock Gating in the DMA and MAC */
- mac_data = er32(CTRL_EXT);
- mac_data &= 0xFFF7FFFF;
- ew32(CTRL_EXT, mac_data);
-
- /* Revert the lanphypc logic to use the internal Gbe counter
- * and not the PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data &= 0xFFFFFF7F;
- ew32(FEXTNVM5, mac_data);
-
- /* Enable the periodic inband message,
- * Request PCIe clock in K1 page770_17[10:9] =01b
- */
- e1e_rphy(hw, HV_PM_CTRL, &phy_data);
- phy_data &= 0xFBFF;
- phy_data |= HV_PM_CTRL_K1_CLK_REQ;
- e1e_wphy(hw, HV_PM_CTRL, phy_data);
-
- /* Return back configuration
- * 772_29[5] = 0 CS_Mode_Stay_In_K1
- */
- e1e_rphy(hw, I217_CGFREG, &phy_data);
- phy_data &= 0xFFDF;
- e1e_wphy(hw, I217_CGFREG, phy_data);
-
- /* Change the MAC/PHY interface to Kumeran
- * Unforce the SMBus in PHY page769_23[0] = 0
- * Unforce the SMBus in MAC CTRL_EXT[11] = 0
- */
- e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
- phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, phy_data);
- mac_data = er32(CTRL_EXT);
- mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_data);
+ /* Enable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= ~BIT(31);
+ mac_data |= BIT(0);
+ ew32(FEXTNVM7, mac_data);
}
static int e1000e_pm_freeze(struct device *dev)
@@ -7302,7 +7354,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_set_rx_mode = e1000e_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
- .ndo_do_ioctl = e1000_ioctl,
+ .ndo_eth_ioctl = e1000_ioctl,
.ndo_tx_timeout = e1000_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@@ -7677,7 +7729,7 @@ err_dma:
* @pdev: PCI device information struct
*
* e1000_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
+ * that it should release a PCI device. This could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
@@ -7850,14 +7902,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 9e79d672f4f1..eb5c014c02fb 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -298,6 +298,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_tgp:
case e1000_pch_adp:
case e1000_pch_mtp:
+ case e1000_pch_lnp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 8165ba2619a4..6c0cd8cab3ef 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -213,6 +213,7 @@
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_EXFWSM 0x05B58 /* Extended FW Semaphore */
/* Driver-only SW semaphore (not used by BOOT agents) */
#define E1000_SWSM2 0x05B58
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b9417dc0007c..39fb3d57c057 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -428,6 +428,8 @@ struct i40e_channel {
struct i40e_vsi *parent_vsi;
};
+struct i40e_ptp_pins_settings;
+
static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
{
return !!ch->fwd;
@@ -644,12 +646,83 @@ struct i40e_pf {
struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */
struct i40e_dcbx_config tmp_cfg;
+/* GPIO defines used by PTP */
+#define I40E_SDP3_2 18
+#define I40E_SDP3_3 19
+#define I40E_GPIO_4 20
+#define I40E_LED2_0 26
+#define I40E_LED2_1 27
+#define I40E_LED3_0 28
+#define I40E_LED3_1 29
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_HI \
+ (1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRV_SDP_DATA \
+ (1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_0 \
+ (0 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_1 \
+ (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_RESERVED BIT(2)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z \
+ (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_DIR_OUT \
+ (1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_DRV_HI \
+ (1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_HI_RST \
+ (1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TIMESYNC_0 \
+ (3 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TIMESYNC_1 \
+ (4 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN \
+ (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT \
+ (1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0)
+#define I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1)
+#define I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0)
+#define I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1 \
+ (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \
+ I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1)
+#define I40E_GLGEN_GPIO_CTL_LED_INIT \
+ (I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z | \
+ I40E_GLGEN_GPIO_CTL_DIR_OUT | \
+ I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | \
+ I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \
+ I40E_GLGEN_GPIO_CTL_OUT_DEFAULT | \
+ I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN)
+#define I40E_PRTTSYN_AUX_1_INSTNT \
+ (1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENABLE \
+ (1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_CLK_MOD (3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD \
+ (I40E_PRTTSYN_AUX_0_OUT_ENABLE | I40E_PRTTSYN_AUX_0_OUT_CLK_MOD)
+#define I40E_PTP_HALF_SECOND 500000000LL /* nano seconds */
+#define I40E_PTP_2_SEC_DELAY 2
+
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
+ struct work_struct ptp_pps_work;
+ struct work_struct ptp_extts0_work;
+ struct work_struct ptp_extts1_work;
ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
@@ -657,10 +730,14 @@ struct i40e_pf {
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
+ u64 ptp_pps_start;
+ u32 pps_delay;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
+ struct ptp_pin_desc ptp_pin[3];
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
+ struct i40e_ptp_pins_settings *ptp_pins;
u16 rss_table_size; /* HW RSS table size */
u32 max_bw;
u32 min_bw;
@@ -1169,6 +1246,7 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf);
void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_ptp_alloc_pins(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1d1f52756a93..2f20980dd9a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4079,10 +4079,13 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
- icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
+ schedule_work(&pf->ptp_extts0_work);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
i40e_ptp_tx_hwtstamp(pf);
- }
+
+ icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
}
/* If a critical error is pending we have no choice but to reset the
@@ -4635,7 +4638,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
err = i40e_control_wait_rx_q(pf, pf_q, false);
if (err)
dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d dissable timeout\n",
+ "VSI seid %d Rx ring %d disable timeout\n",
vsi->seid, pf_q);
}
@@ -13265,7 +13268,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
- .ndo_do_ioctl = i40e_ioctl,
+ .ndo_eth_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -15181,6 +15184,22 @@ err_switch_setup:
}
/**
+ * i40e_set_subsystem_device_id - set subsystem device id
+ * @hw: pointer to the hardware info
+ *
+ * Set PCI subsystem device id either from a pci_dev structure or
+ * a specific FW register.
+ **/
+static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
+{
+ struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
+
+ hw->subsystem_device_id = pdev->subsystem_device ?
+ pdev->subsystem_device :
+ (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -15275,7 +15294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->device_id = pdev->device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
+ i40e_set_subsystem_device_id(hw);
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
@@ -15455,6 +15474,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_valid_ether_addr(hw->mac.port_addr))
pf->hw_features |= I40E_HW_PORT_ID_VALID;
+ i40e_ptp_alloc_pins(pf);
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 7b971b205d36..09b1d5aed1c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -3,6 +3,7 @@
#include "i40e.h"
#include <linux/ptp_classify.h>
+#include <linux/posix-clock.h>
/* The XL710 timesync is very much like Intel's 82599 design when it comes to
* the fundamental clock design. However, the clock operations are much simpler
@@ -20,10 +21,252 @@
#define I40E_PTP_10GB_INCVAL_MULT 2
#define I40E_PTP_5GB_INCVAL_MULT 2
#define I40E_PTP_1GB_INCVAL_MULT 20
+#define I40E_ISGN 0x80000000
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
+#define to_dev(obj) container_of(obj, struct device, kobj)
+
+enum i40e_ptp_pin {
+ SDP3_2 = 0,
+ SDP3_3,
+ GPIO_4
+};
+
+enum i40e_can_set_pins_t {
+ CANT_DO_PINS = -1,
+ CAN_SET_PINS,
+ CAN_DO_PINS
+};
+
+static struct ptp_pin_desc sdp_desc[] = {
+ /* name idx func chan */
+ {"SDP3_2", SDP3_2, PTP_PF_NONE, 0},
+ {"SDP3_3", SDP3_3, PTP_PF_NONE, 1},
+ {"GPIO_4", GPIO_4, PTP_PF_NONE, 1},
+};
+
+enum i40e_ptp_gpio_pin_state {
+ end = -2,
+ invalid,
+ off,
+ in_A,
+ in_B,
+ out_A,
+ out_B,
+};
+
+static const char * const i40e_ptp_gpio_pin_state2str[] = {
+ "off", "in_A", "in_B", "out_A", "out_B"
+};
+
+enum i40e_ptp_led_pin_state {
+ led_end = -2,
+ low = 0,
+ high,
+};
+
+struct i40e_ptp_pins_settings {
+ enum i40e_ptp_gpio_pin_state sdp3_2;
+ enum i40e_ptp_gpio_pin_state sdp3_3;
+ enum i40e_ptp_gpio_pin_state gpio_4;
+ enum i40e_ptp_led_pin_state led2_0;
+ enum i40e_ptp_led_pin_state led2_1;
+ enum i40e_ptp_led_pin_state led3_0;
+ enum i40e_ptp_led_pin_state led3_1;
+};
+
+static const struct i40e_ptp_pins_settings
+ i40e_ptp_pin_led_allowed_states[] = {
+ {off, off, off, high, high, high, high},
+ {off, in_A, off, high, high, high, low},
+ {off, out_A, off, high, low, high, high},
+ {off, in_B, off, high, high, high, low},
+ {off, out_B, off, high, low, high, high},
+ {in_A, off, off, high, high, high, low},
+ {in_A, in_B, off, high, high, high, low},
+ {in_A, out_B, off, high, low, high, high},
+ {out_A, off, off, high, low, high, high},
+ {out_A, in_B, off, high, low, high, high},
+ {in_B, off, off, high, high, high, low},
+ {in_B, in_A, off, high, high, high, low},
+ {in_B, out_A, off, high, low, high, high},
+ {out_B, off, off, high, low, high, high},
+ {out_B, in_A, off, high, low, high, high},
+ {off, off, in_A, high, high, low, high},
+ {off, out_A, in_A, high, low, low, high},
+ {off, in_B, in_A, high, high, low, low},
+ {off, out_B, in_A, high, low, low, high},
+ {out_A, off, in_A, high, low, low, high},
+ {out_A, in_B, in_A, high, low, low, high},
+ {in_B, off, in_A, high, high, low, low},
+ {in_B, out_A, in_A, high, low, low, high},
+ {out_B, off, in_A, high, low, low, high},
+ {off, off, out_A, low, high, high, high},
+ {off, in_A, out_A, low, high, high, low},
+ {off, in_B, out_A, low, high, high, low},
+ {off, out_B, out_A, low, low, high, high},
+ {in_A, off, out_A, low, high, high, low},
+ {in_A, in_B, out_A, low, high, high, low},
+ {in_A, out_B, out_A, low, low, high, high},
+ {in_B, off, out_A, low, high, high, low},
+ {in_B, in_A, out_A, low, high, high, low},
+ {out_B, off, out_A, low, low, high, high},
+ {out_B, in_A, out_A, low, low, high, high},
+ {off, off, in_B, high, high, low, high},
+ {off, in_A, in_B, high, high, low, low},
+ {off, out_A, in_B, high, low, low, high},
+ {off, out_B, in_B, high, low, low, high},
+ {in_A, off, in_B, high, high, low, low},
+ {in_A, out_B, in_B, high, low, low, high},
+ {out_A, off, in_B, high, low, low, high},
+ {out_B, off, in_B, high, low, low, high},
+ {out_B, in_A, in_B, high, low, low, high},
+ {off, off, out_B, low, high, high, high},
+ {off, in_A, out_B, low, high, high, low},
+ {off, out_A, out_B, low, low, high, high},
+ {off, in_B, out_B, low, high, high, low},
+ {in_A, off, out_B, low, high, high, low},
+ {in_A, in_B, out_B, low, high, high, low},
+ {out_A, off, out_B, low, low, high, high},
+ {out_A, in_B, out_B, low, low, high, high},
+ {in_B, off, out_B, low, high, high, low},
+ {in_B, in_A, out_B, low, high, high, low},
+ {in_B, out_A, out_B, low, low, high, high},
+ {end, end, end, led_end, led_end, led_end, led_end}
+};
+
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins);
+
+/**
+ * i40e_ptp_extts0_work - workqueue task function
+ * @work: workqueue task structure
+ *
+ * Service for PTP external clock event
+ **/
+static void i40e_ptp_extts0_work(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work, struct i40e_pf,
+ ptp_extts0_work);
+ struct i40e_hw *hw = &pf->hw;
+ struct ptp_clock_event event;
+ u32 hi, lo;
+
+ /* Event time is captured by one of the two matched registers
+ * PRTTSYN_EVNT_L: 32 LSB of sampled time event
+ * PRTTSYN_EVNT_H: 32 MSB of sampled time event
+ * Event is defined in PRTTSYN_EVNT_0 register
+ */
+ lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0));
+ hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0));
+
+ event.timestamp = (((u64)hi) << 32) | lo;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = hw->pf_id;
+
+ /* fire event */
+ ptp_clock_event(pf->ptp_clock, &event);
+}
+
+/**
+ * i40e_is_ptp_pin_dev - check if device supports PTP pins
+ * @hw: pointer to the hardware structure
+ *
+ * Return true if device supports PTP pins, false otherwise.
+ **/
+static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
+{
+ return hw->device_id == I40E_DEV_ID_25G_SFP28 &&
+ hw->subsystem_device_id == I40E_SUBDEV_ID_25G_PTP_PIN;
+}
+
+/**
+ * i40e_can_set_pins - check possibility of manipulating the pins
+ * @pf: board private structure
+ *
+ * Check if all conditions are satisfied to manipulate PTP pins.
+ * Return CAN_SET_PINS if pins can be set on a specific PF or
+ * return CAN_DO_PINS if pins can be manipulated within a NIC or
+ * return CANT_DO_PINS otherwise.
+ **/
+static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
+{
+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+ dev_warn(&pf->pdev->dev,
+ "PTP external clock not supported.\n");
+ return CANT_DO_PINS;
+ }
+
+ if (!pf->ptp_pins) {
+ dev_warn(&pf->pdev->dev,
+ "PTP PIN manipulation not allowed.\n");
+ return CANT_DO_PINS;
+ }
+
+ if (pf->hw.pf_id) {
+ dev_warn(&pf->pdev->dev,
+ "PTP PINs should be accessed via PF0.\n");
+ return CAN_DO_PINS;
+ }
+
+ return CAN_SET_PINS;
+}
+
+/**
+ * i40_ptp_reset_timing_events - Reset PTP timing events
+ * @pf: Board private structure
+ *
+ * This function resets timing events for pf.
+ **/
+static void i40_ptp_reset_timing_events(struct i40e_pf *pf)
+{
+ u32 i;
+
+ spin_lock_bh(&pf->ptp_rx_lock);
+ for (i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++) {
+ /* reading and automatically clearing timing events registers */
+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i));
+ rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i));
+ pf->latch_events[i] = 0;
+ }
+ /* reading and automatically clearing timing events registers */
+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L);
+ rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H);
+
+ pf->tx_hwtstamp_timeouts = 0;
+ pf->tx_hwtstamp_skipped = 0;
+ pf->rx_hwtstamp_cleared = 0;
+ pf->latch_event_flags = 0;
+ spin_unlock_bh(&pf->ptp_rx_lock);
+}
+
+/**
+ * i40e_ptp_verify - check pins
+ * @ptp: ptp clock
+ * @pin: pin index
+ * @func: assigned function
+ * @chan: channel
+ *
+ * Check pins consistency.
+ * Return 0 on success or error on failure.
+ **/
+static int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
/**
* i40e_ptp_read - Read the PHC time from the device
@@ -137,6 +380,37 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
/**
+ * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins
+ * @pf: the PF private data structure
+ *
+ * Configure 1PPS signal used for PTP pins
+ **/
+static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct timespec64 now;
+ u64 ns;
+
+ wr32(hw, I40E_PRTTSYN_AUX_0(1), 0);
+ wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT);
+ wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE);
+
+ i40e_ptp_read(pf, &now, NULL);
+ now.tv_sec += I40E_PTP_2_SEC_DELAY;
+ now.tv_nsec = 0;
+ ns = timespec64_to_ns(&now);
+
+ /* I40E_PRTTSYN_TGT_L(1) */
+ wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF);
+ /* I40E_PRTTSYN_TGT_H(1) */
+ wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32);
+ wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND);
+ wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT);
+ wr32(hw, I40E_PRTTSYN_AUX_0(1),
+ I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD);
+}
+
+/**
* i40e_ptp_adjtime - Adjust the PHC time
* @ptp: The PTP clock structure
* @delta: Offset in nanoseconds to adjust the PHC time by
@@ -146,14 +420,35 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now, then;
+ struct i40e_hw *hw = &pf->hw;
- then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
- i40e_ptp_read(pf, &now, NULL);
- now = timespec64_add(now, then);
- i40e_ptp_write(pf, (const struct timespec64 *)&now);
+ if (delta > -999999900LL && delta < 999999900LL) {
+ int neg_adj = 0;
+ u32 timadj;
+ u64 tohw;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ tohw = -delta;
+ } else {
+ tohw = delta;
+ }
+
+ timadj = tohw & 0x3FFFFFFF;
+ if (neg_adj)
+ timadj |= I40E_ISGN;
+ wr32(hw, I40E_PRTTSYN_ADJ, timadj);
+ } else {
+ struct timespec64 then, now;
+
+ then = ns_to_timespec64(delta);
+ i40e_ptp_read(pf, &now, NULL);
+ now = timespec64_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec64 *)&now);
+ i40e_ptp_set_1pps_signal_hw(pf);
+ }
mutex_unlock(&pf->tmreg_lock);
@@ -184,7 +479,7 @@ static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
/**
* i40e_ptp_settime - Set the time of the PHC
* @ptp: The PTP clock structure
- * @ts: timespec structure that holds the new time value
+ * @ts: timespec64 structure that holds the new time value
*
* Set the device clock to the user input value. The conversion from timespec
* to ns happens in the write function.
@@ -202,18 +497,145 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
+ * i40e_pps_configure - configure PPS events
+ * @ptp: ptp clock
+ * @rq: clock request
+ * @on: status
+ *
+ * Configure PPS events for external clock source.
+ * Return 0 on success or error on failure.
+ **/
+static int i40e_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ if (!!on)
+ i40e_ptp_set_1pps_signal_hw(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_pin_state - determine PIN state
+ * @index: PIN index
+ * @func: function assigned to PIN
+ *
+ * Determine PIN state based on PIN index and function assigned.
+ * Return PIN state.
+ **/
+static enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func)
+{
+ enum i40e_ptp_gpio_pin_state state = off;
+
+ if (index == 0 && func == PTP_PF_EXTTS)
+ state = in_A;
+ if (index == 1 && func == PTP_PF_EXTTS)
+ state = in_B;
+ if (index == 0 && func == PTP_PF_PEROUT)
+ state = out_A;
+ if (index == 1 && func == PTP_PF_PEROUT)
+ state = out_B;
+
+ return state;
+}
+
+/**
+ * i40e_ptp_enable_pin - enable PINs.
+ * @pf: private board structure
+ * @chan: channel
+ * @func: PIN function
+ * @on: state
+ *
+ * Enable PTP pins for external clock source.
+ * Return 0 on success or error code on failure.
+ **/
+static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan,
+ enum ptp_pin_function func, int on)
+{
+ enum i40e_ptp_gpio_pin_state *pin = NULL;
+ struct i40e_ptp_pins_settings pins;
+ int pin_index;
+
+ /* Use PF0 to set pins. Return success for user space tools */
+ if (pf->hw.pf_id)
+ return 0;
+
+ /* Preserve previous state of pins that we don't touch */
+ pins.sdp3_2 = pf->ptp_pins->sdp3_2;
+ pins.sdp3_3 = pf->ptp_pins->sdp3_3;
+ pins.gpio_4 = pf->ptp_pins->gpio_4;
+
+ /* To turn on the pin - find the corresponding one based on
+ * the given index. To to turn the function off - find
+ * which pin had it assigned. Don't use ptp_find_pin here
+ * because it tries to lock the pincfg_mux which is locked by
+ * ptp_pin_store() that calls here.
+ */
+ if (on) {
+ pin_index = ptp_find_pin(pf->ptp_clock, func, chan);
+ if (pin_index < 0)
+ return -EBUSY;
+
+ switch (pin_index) {
+ case SDP3_2:
+ pin = &pins.sdp3_2;
+ break;
+ case SDP3_3:
+ pin = &pins.sdp3_3;
+ break;
+ case GPIO_4:
+ pin = &pins.gpio_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *pin = i40e_pin_state(chan, func);
+ } else {
+ pins.sdp3_2 = off;
+ pins.sdp3_3 = off;
+ pins.gpio_4 = off;
+ }
+
+ return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0;
+}
+
+/**
+ * i40e_ptp_feature_enable - Enable external clock pins
* @ptp: The PTP clock structure
- * @rq: The requested feature to change
- * @on: Enable/disable flag
+ * @rq: The PTP clock request structure
+ * @on: To turn feature on/off
*
- * The XL710 does not support any of the ancillary features of the PHC
- * subsystem, so this function may just return.
+ * Setting on/off PTP PPS feature for pin.
**/
static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+ struct ptp_clock_request *rq,
+ int on)
{
- return -EOPNOTSUPP;
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+
+ enum ptp_pin_function func;
+ unsigned int chan;
+
+ /* TODO: Implement flags handling for EXTTS and PEROUT */
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ func = PTP_PF_EXTTS;
+ chan = rq->extts.index;
+ break;
+ case PTP_CLK_REQ_PEROUT:
+ func = PTP_PF_PEROUT;
+ chan = rq->perout.index;
+ break;
+ case PTP_CLK_REQ_PPS:
+ return i40e_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return i40e_ptp_enable_pin(pf, chan, func, on);
}
/**
@@ -528,6 +950,229 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
}
/**
+ * i40e_ptp_free_pins - free memory used by PTP pins
+ * @pf: Board private structure
+ *
+ * Release memory allocated for PTP pins.
+ **/
+static void i40e_ptp_free_pins(struct i40e_pf *pf)
+{
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ kfree(pf->ptp_pins);
+ kfree(pf->ptp_caps.pin_config);
+ pf->ptp_pins = NULL;
+ }
+}
+
+/**
+ * i40e_ptp_set_pin_hw - Set HW GPIO pin
+ * @hw: pointer to the hardware structure
+ * @pin: pin index
+ * @state: pin state
+ *
+ * Set status of GPIO pin for external clock handling.
+ **/
+static void i40e_ptp_set_pin_hw(struct i40e_hw *hw,
+ unsigned int pin,
+ enum i40e_ptp_gpio_pin_state state)
+{
+ switch (state) {
+ case off:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0);
+ break;
+ case in_A:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0);
+ break;
+ case in_B:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0);
+ break;
+ case out_A:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1);
+ break;
+ case out_B:
+ wr32(hw, I40E_GLGEN_GPIO_CTL(pin),
+ I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_ptp_set_led_hw - Set HW GPIO led
+ * @hw: pointer to the hardware structure
+ * @led: led index
+ * @state: led state
+ *
+ * Set status of GPIO led for external clock handling.
+ **/
+static void i40e_ptp_set_led_hw(struct i40e_hw *hw,
+ unsigned int led,
+ enum i40e_ptp_led_pin_state state)
+{
+ switch (state) {
+ case low:
+ wr32(hw, I40E_GLGEN_GPIO_SET,
+ I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | led);
+ break;
+ case high:
+ wr32(hw, I40E_GLGEN_GPIO_SET,
+ I40E_GLGEN_GPIO_SET_DRV_SDP_DATA |
+ I40E_GLGEN_GPIO_SET_SDP_DATA_HI | led);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_ptp_init_leds_hw - init LEDs
+ * @hw: pointer to a hardware structure
+ *
+ * Set initial state of LEDs
+ **/
+static void i40e_ptp_init_leds_hw(struct i40e_hw *hw)
+{
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_0),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_1),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_0),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+ wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_1),
+ I40E_GLGEN_GPIO_CTL_LED_INIT);
+}
+
+/**
+ * i40e_ptp_set_pins_hw - Set HW GPIO pins
+ * @pf: Board private structure
+ *
+ * This function sets GPIO pins for PTP
+ **/
+static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
+{
+ const struct i40e_ptp_pins_settings *pins = pf->ptp_pins;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* pin must be disabled before it may be used */
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off);
+
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, pins->sdp3_2);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, pins->sdp3_3);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, pins->gpio_4);
+
+ i40e_ptp_set_led_hw(hw, I40E_LED2_0, pins->led2_0);
+ i40e_ptp_set_led_hw(hw, I40E_LED2_1, pins->led2_1);
+ i40e_ptp_set_led_hw(hw, I40E_LED3_0, pins->led3_0);
+ i40e_ptp_set_led_hw(hw, I40E_LED3_1, pins->led3_1);
+
+ dev_info(&pf->pdev->dev,
+ "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n",
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+}
+
+/**
+ * i40e_ptp_set_pins - set PTP pins in HW
+ * @pf: Board private structure
+ * @pins: PTP pins to be applied
+ *
+ * Validate and set PTP pins in HW for specific PF.
+ * Return 0 on success or negative value on error.
+ **/
+static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins)
+{
+ enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
+ int i = 0;
+
+ if (pin_caps == CANT_DO_PINS)
+ return -EOPNOTSUPP;
+ else if (pin_caps == CAN_DO_PINS)
+ return 0;
+
+ if (pins->sdp3_2 == invalid)
+ pins->sdp3_2 = pf->ptp_pins->sdp3_2;
+ if (pins->sdp3_3 == invalid)
+ pins->sdp3_3 = pf->ptp_pins->sdp3_3;
+ if (pins->gpio_4 == invalid)
+ pins->gpio_4 = pf->ptp_pins->gpio_4;
+ while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) {
+ if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 &&
+ pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 &&
+ pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) {
+ pins->led2_0 =
+ i40e_ptp_pin_led_allowed_states[i].led2_0;
+ pins->led2_1 =
+ i40e_ptp_pin_led_allowed_states[i].led2_1;
+ pins->led3_0 =
+ i40e_ptp_pin_led_allowed_states[i].led3_0;
+ pins->led3_1 =
+ i40e_ptp_pin_led_allowed_states[i].led3_1;
+ break;
+ }
+ i++;
+ }
+ if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) {
+ dev_warn(&pf->pdev->dev,
+ "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n",
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_2],
+ i40e_ptp_gpio_pin_state2str[pins->sdp3_3],
+ i40e_ptp_gpio_pin_state2str[pins->gpio_4]);
+
+ return -EPERM;
+ }
+ memcpy(pf->ptp_pins, pins, sizeof(*pins));
+ i40e_ptp_set_pins_hw(pf);
+ i40_ptp_reset_timing_events(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_alloc_pins - allocate PTP pins structure
+ * @pf: Board private structure
+ *
+ * allocate PTP pins structure
+ **/
+int i40e_ptp_alloc_pins(struct i40e_pf *pf)
+{
+ if (!i40e_is_ptp_pin_dev(&pf->hw))
+ return 0;
+
+ pf->ptp_pins =
+ kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL);
+
+ if (!pf->ptp_pins) {
+ dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n");
+ return -I40E_ERR_NO_MEMORY;
+ }
+
+ pf->ptp_pins->sdp3_2 = off;
+ pf->ptp_pins->sdp3_3 = off;
+ pf->ptp_pins->gpio_4 = off;
+ pf->ptp_pins->led2_0 = high;
+ pf->ptp_pins->led2_1 = high;
+ pf->ptp_pins->led3_0 = high;
+ pf->ptp_pins->led3_1 = high;
+
+ /* Use PF0 to set pins in HW. Return success for user space tools */
+ if (pf->hw.pf_id)
+ return 0;
+
+ i40e_ptp_init_leds_hw(&pf->hw);
+ i40e_ptp_set_pins_hw(pf);
+
+ return 0;
+}
+
+/**
* i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
@@ -545,6 +1190,21 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
struct i40e_hw *hw = &pf->hw;
u32 tsyntype, regval;
+ /* Selects external trigger to cause event */
+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+ /* Bit 17:16 is EVNTLVL, 01B rising edge */
+ regval &= 0;
+ regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT);
+ /* regval: 0001 0000 0000 0000 0000 */
+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+ /* Enabel interrupts */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
+
/* Reserved for future extensions. */
if (config->flags)
return -EINVAL;
@@ -688,6 +1348,45 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
}
/**
+ * i40e_init_pin_config - initialize pins.
+ * @pf: private board structure
+ *
+ * Initialize pins for external clock source.
+ * Return 0 on success or error code on failure.
+ **/
+static int i40e_init_pin_config(struct i40e_pf *pf)
+{
+ int i;
+
+ pf->ptp_caps.n_pins = 3;
+ pf->ptp_caps.n_ext_ts = 2;
+ pf->ptp_caps.pps = 1;
+ pf->ptp_caps.n_per_out = 2;
+
+ pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins,
+ sizeof(*pf->ptp_caps.pin_config),
+ GFP_KERNEL);
+ if (!pf->ptp_caps.pin_config)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->ptp_caps.n_pins; i++) {
+ snprintf(pf->ptp_caps.pin_config[i].name,
+ sizeof(pf->ptp_caps.pin_config[i].name),
+ "%s", sdp_desc[i].name);
+ pf->ptp_caps.pin_config[i].index = sdp_desc[i].index;
+ pf->ptp_caps.pin_config[i].func = PTP_PF_NONE;
+ pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan;
+ }
+
+ pf->ptp_caps.verify = i40e_ptp_verify;
+ pf->ptp_caps.enable = i40e_ptp_feature_enable;
+
+ pf->ptp_caps.pps = 1;
+
+ return 0;
+}
+
+/**
* i40e_ptp_create_clock - Create PTP clock device for userspace
* @pf: Board private structure
*
@@ -707,13 +1406,16 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
- pf->ptp_caps.n_ext_ts = 0;
- pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
- pf->ptp_caps.enable = i40e_ptp_feature_enable;
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ int err = i40e_init_pin_config(pf);
+
+ if (err)
+ return err;
+ }
/* Attempt to register the clock before enabling the hardware. */
pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
@@ -843,6 +1545,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* Restore the clock time based on last known value */
i40e_ptp_restore_hw_time(pf);
}
+
+ i40e_ptp_set_1pps_signal_hw(pf);
}
/**
@@ -854,6 +1558,9 @@ void i40e_ptp_init(struct i40e_pf *pf)
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
+ struct i40e_hw *hw = &pf->hw;
+ u32 regval;
+
pf->flags &= ~I40E_FLAG_PTP;
pf->ptp_tx = false;
pf->ptp_rx = false;
@@ -872,4 +1579,21 @@ void i40e_ptp_stop(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
pf->vsi[pf->lan_vsi]->netdev->name);
}
+
+ if (i40e_is_ptp_pin_dev(&pf->hw)) {
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off);
+ i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off);
+ i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off);
+ }
+
+ regval = rd32(hw, I40E_PRTTSYN_AUX_0(0));
+ regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK;
+ wr32(hw, I40E_PRTTSYN_AUX_0(0), regval);
+
+ /* Disable interrupts */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval &= ~I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ i40e_ptp_free_pins(pf);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 36f7b27a04ae..8d0588a27a05 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -182,11 +182,20 @@
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
@@ -540,6 +549,7 @@
#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
@@ -742,6 +752,8 @@
#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
@@ -760,7 +772,10 @@
#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
@@ -768,6 +783,20 @@
#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17
+#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index eff0a30790dd..472f56b360b8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1160,12 +1160,12 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
}
/**
- * i40e_getnum_vf_vsi_vlan_filters
+ * __i40e_getnum_vf_vsi_vlan_filters
* @vsi: pointer to the vsi
*
* called to get the number of VLANs offloaded on this VF
**/
-static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
u16 num_vlans = 0, bkt;
@@ -1179,6 +1179,23 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
}
/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
+ **/
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ int num_vlans;
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ return num_vlans;
+}
+
+/**
* i40e_get_vlan_list_sync
* @vsi: pointer to the VSI
* @num_vlans: number of VLANs in mac_filter_hash, returned to caller
@@ -1195,7 +1212,7 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
int bkt;
spin_lock_bh(&vsi->mac_filter_hash_lock);
- *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
+ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
if (!(*vlan_list))
goto err;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 90793b36126e..68c80f04113c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -186,12 +186,6 @@ enum iavf_state_t {
__IAVF_RUNNING, /* opened, working */
};
-enum iavf_critical_section_t {
- __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */
- __IAVF_IN_CLIENT_TASK,
- __IAVF_IN_REMOVE_TASK, /* device being removed */
-};
-
#define IAVF_CLOUD_FIELD_OMAC 0x01
#define IAVF_CLOUD_FIELD_IMAC 0x02
#define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -236,6 +230,9 @@ struct iavf_adapter {
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
struct list_head mac_filter_list;
+ struct mutex crit_lock;
+ struct mutex client_lock;
+ struct mutex remove_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index af43fbd8cb75..edbeb27213f8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1352,8 +1352,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr)
return -ENOMEM;
- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
+ while (!mutex_trylock(&adapter->crit_lock)) {
if (--count == 0) {
kfree(fltr);
return -EINVAL;
@@ -1378,7 +1377,7 @@ ret:
if (err && fltr)
kfree(fltr);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -1563,8 +1562,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL;
}
- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
+ while (!mutex_trylock(&adapter->crit_lock)) {
if (--count == 0) {
kfree(rss_new);
return -EINVAL;
@@ -1600,7 +1598,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err)
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
if (!rss_new_add)
kfree(rss_new);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 606a01ce4073..23762a7ef740 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -132,6 +132,27 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
}
/**
+ * iavf_lock_timeout - try to lock mutex but give up after timeout
+ * @lock: mutex that should be locked
+ * @msecs: timeout in msecs
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+{
+ unsigned int wait, delay = 10;
+
+ for (wait = 0; wait < msecs; wait += delay) {
+ if (mutex_trylock(lock))
+ return 0;
+
+ msleep(delay);
+ }
+
+ return -1;
+}
+
+/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
**/
@@ -1916,7 +1937,7 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ if (!mutex_trylock(&adapter->crit_lock))
goto restart_watchdog;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
@@ -1934,8 +1955,7 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->state = __IAVF_STARTUP;
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
queue_delayed_work(iavf_wq, &adapter->init_task, 10);
- clear_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
/* Don't reschedule the watchdog, since we've restarted
* the init task. When init_task contacts the PF and
* gets everything set up again, it'll restart the
@@ -1945,14 +1965,13 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- clear_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
goto watchdog_done;
case __IAVF_RESETTING:
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
return;
case __IAVF_DOWN:
@@ -1975,7 +1994,7 @@ static void iavf_watchdog_task(struct work_struct *work)
}
break;
case __IAVF_REMOVE:
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
return;
default:
goto restart_watchdog;
@@ -1984,7 +2003,6 @@ static void iavf_watchdog_task(struct work_struct *work)
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- adapter->state = __IAVF_RESETTING;
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -1998,7 +2016,7 @@ watchdog_done:
if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_COMM_FAILED)
iavf_detect_recover_hung(&adapter->vsi);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
restart_watchdog:
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
@@ -2062,7 +2080,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
adapter->state = __IAVF_DOWN;
wake_up(&adapter->down_waitqueue);
@@ -2095,11 +2113,14 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+ if (mutex_is_locked(&adapter->remove_lock))
return;
- while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
- &adapter->crit_section))
+ if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
+ schedule_work(&adapter->reset_task);
+ return;
+ }
+ while (!mutex_trylock(&adapter->client_lock))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
@@ -2151,7 +2172,7 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->client_lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2278,13 +2299,13 @@ continue_reset:
adapter->state = __IAVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
return;
reset_err:
- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -2312,6 +2333,8 @@ static void iavf_adminq_task(struct work_struct *work)
if (!event.msg_buf)
goto out;
+ if (iavf_lock_timeout(&adapter->crit_lock, 200))
+ goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2325,6 +2348,7 @@ static void iavf_adminq_task(struct work_struct *work)
if (pending != 0)
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
} while (pending);
+ mutex_unlock(&adapter->crit_lock);
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
@@ -2391,7 +2415,7 @@ static void iavf_client_task(struct work_struct *work)
* later.
*/
- if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
+ if (!mutex_trylock(&adapter->client_lock))
return;
if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
@@ -2414,7 +2438,7 @@ static void iavf_client_task(struct work_struct *work)
adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
}
out:
- clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->client_lock);
}
/**
@@ -3017,8 +3041,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
if (!filter)
return -ENOMEM;
- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
+ while (!mutex_trylock(&adapter->crit_lock)) {
if (--count == 0)
goto err;
udelay(1);
@@ -3049,7 +3072,7 @@ err:
if (err)
kfree(filter);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -3196,8 +3219,7 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
+ while (!mutex_trylock(&adapter->crit_lock))
usleep_range(500, 1000);
if (adapter->state != __IAVF_DOWN) {
@@ -3232,7 +3254,7 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
return 0;
@@ -3244,7 +3266,7 @@ err_setup_rx:
err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -3268,8 +3290,7 @@ static int iavf_close(struct net_device *netdev)
if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
+ while (!mutex_trylock(&adapter->crit_lock))
usleep_range(500, 1000);
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -3280,7 +3301,7 @@ static int iavf_close(struct net_device *netdev)
adapter->state = __IAVF_DOWN_PENDING;
iavf_free_traffic_irqs(adapter);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -3629,6 +3650,10 @@ static void iavf_init_task(struct work_struct *work)
init_task.work);
struct iavf_hw *hw = &adapter->hw;
+ if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
+ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ return;
+ }
switch (adapter->state) {
case __IAVF_STARTUP:
if (iavf_startup(adapter) < 0)
@@ -3641,14 +3666,14 @@ static void iavf_init_task(struct work_struct *work)
case __IAVF_INIT_GET_RESOURCES:
if (iavf_init_get_resources(adapter) < 0)
goto init_failed;
- return;
+ goto out;
default:
goto init_failed;
}
queue_delayed_work(iavf_wq, &adapter->init_task,
msecs_to_jiffies(30));
- return;
+ goto out;
init_failed:
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
@@ -3657,9 +3682,11 @@ init_failed:
iavf_shutdown_adminq(hw);
adapter->state = __IAVF_STARTUP;
queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- return;
+ goto out;
}
queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
+out:
+ mutex_unlock(&adapter->crit_lock);
}
/**
@@ -3676,9 +3703,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
iavf_close(netdev);
+ if (iavf_lock_timeout(&adapter->crit_lock, 5000))
+ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
adapter->state = __IAVF_REMOVE;
adapter->aq_required = 0;
+ mutex_unlock(&adapter->crit_lock);
#ifdef CONFIG_PM
pci_save_state(pdev);
@@ -3772,6 +3802,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
+ mutex_init(&adapter->crit_lock);
+ mutex_init(&adapter->client_lock);
+ mutex_init(&adapter->remove_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -3823,8 +3856,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
netif_device_detach(netdev);
- while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
+ while (!mutex_trylock(&adapter->crit_lock))
usleep_range(500, 1000);
if (netif_running(netdev)) {
@@ -3835,7 +3867,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -3897,7 +3929,7 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_hw *hw = &adapter->hw;
int err;
/* Indicate we are in remove and not to run reset_task */
- set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
+ mutex_lock(&adapter->remove_lock);
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->client_task);
@@ -3912,10 +3944,6 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
- /* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
- adapter->aq_required = 0;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -3923,6 +3951,13 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
+ if (iavf_lock_timeout(&adapter->crit_lock, 5000))
+ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+
+ /* Shut down all the garbage mashers on the detention level */
+ adapter->state = __IAVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_misc_irq_disable(adapter);
@@ -3942,6 +3977,11 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
+ mutex_destroy(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
+ mutex_destroy(&adapter->crit_lock);
+ mutex_unlock(&adapter->remove_lock);
+ mutex_destroy(&adapter->remove_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 91b545ab8b8f..8c863d64930b 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -475,7 +475,7 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
{
struct devlink *devlink;
- devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
+ devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev);
if (!devlink)
return NULL;
@@ -502,7 +502,7 @@ int ice_devlink_register(struct ice_pf *pf)
struct device *dev = ice_pf_to_dev(pf);
int err;
- err = devlink_register(devlink, dev);
+ err = devlink_register(devlink);
if (err) {
dev_err(dev, "devlink registration failed: %d\n", err);
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index fe2ded775f25..60d55d043a94 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6570,12 +6570,12 @@ event_after:
}
/**
- * ice_do_ioctl - Access the hwtstamp interface
+ * ice_eth_ioctl - Access the hwtstamp interface
* @netdev: network interface device structure
* @ifr: interface request data
* @cmd: ioctl command
*/
-static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
@@ -7241,7 +7241,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
- .ndo_do_ioctl = ice_do_ioctl,
+ .ndo_eth_ioctl = ice_eth_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index e63ee3cca5ea..1277c5c7d099 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -492,6 +492,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
**/
static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
{
+ int failed_cnt = 3;
bool is_failed;
int i;
@@ -502,9 +503,12 @@ static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
is_failed = true;
array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
wrfl();
- break;
}
}
+ if (is_failed && --failed_cnt <= 0) {
+ hw_dbg("Failed to update MTA_REGISTER, too many retries");
+ break;
+ }
} while (is_failed);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 636a1b1fb7e1..17f5c003c3df 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2343,8 +2343,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *igb_gstrings_test,
- IGB_TEST_LEN*ETH_GSTRING_LEN);
+ memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 171a7a629b20..751de06019a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2991,7 +2991,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_rx_mode = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
.ndo_change_mtu = igb_change_mtu,
- .ndo_do_ioctl = igb_ioctl,
+ .ndo_eth_ioctl = igb_ioctl,
.ndo_tx_timeout = igb_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 1bbe9862a758..d32e72d953c8 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2657,7 +2657,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_set_rx_mode = igbvf_set_rx_mode,
.ndo_set_mac_address = igbvf_set_mac,
.ndo_change_mtu = igbvf_change_mtu,
- .ndo_do_ioctl = igbvf_ioctl,
+ .ndo_eth_ioctl = igbvf_ioctl,
.ndo_tx_timeout = igbvf_tx_timeout,
.ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 5901ed9fb545..a0ecfe5a4078 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -33,6 +33,8 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_N_PEROUT 2
#define IGC_N_SDP 4
+#define MAX_FLEX_FILTER 32
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -476,18 +478,28 @@ struct igc_q_vector {
};
enum igc_filter_match_flags {
- IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
- IGC_FILTER_FLAG_VLAN_TCI = 0x2,
- IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
- IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
+ IGC_FILTER_FLAG_ETHER_TYPE = BIT(0),
+ IGC_FILTER_FLAG_VLAN_TCI = BIT(1),
+ IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2),
+ IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
+ IGC_FILTER_FLAG_USER_DATA = BIT(4),
+ IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
};
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
+ __be16 vlan_etype;
u16 vlan_tci;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
+ u8 user_data[8];
+ u8 user_mask[8];
+ u8 flex_index;
+ u8 rx_queue;
+ u8 prio;
+ u8 immediate_irq;
+ u8 drop;
};
struct igc_nfc_rule {
@@ -495,12 +507,24 @@ struct igc_nfc_rule {
struct igc_nfc_filter filter;
u32 location;
u16 action;
+ bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority
- * based, and 8 ethertype based.
+/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
+ * based, 8 ethertype based and 32 Flex filter based rules.
*/
-#define IGC_MAX_RXNFC_RULES 32
+#define IGC_MAX_RXNFC_RULES 64
+
+struct igc_flex_filter {
+ u8 index;
+ u8 data[128];
+ u8 mask[16];
+ u8 length;
+ u8 rx_queue;
+ u8 prio;
+ u8 immediate_irq;
+ u8 drop;
+};
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index d0700d48ecf9..84f142f5e472 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I225_I_PHY_ID:
- phy->type = igc_phy_i225;
- break;
- default:
- ret_val = -IGC_ERR_PHY;
- goto out;
- }
+ phy->type = igc_phy_i225;
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index c3a5a5518790..c6315690e20f 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -17,11 +17,22 @@
#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */
/* Wake Up Filter Control */
-#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */
+#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */
+#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */
+#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */
+#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */
+#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */
+#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */
+#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */
+#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */
+
+#define IGC_WUFC_FILTER_MASK GENMASK(23, 14)
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -46,6 +57,37 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
+/* Wakeup Filter Control Extended */
+#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */
+#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */
+#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */
+#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */
+#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */
+#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */
+#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */
+#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */
+#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */
+#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */
+#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */
+#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */
+#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */
+#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */
+#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */
+#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */
+#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */
+#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */
+#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */
+#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */
+#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */
+#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */
+#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */
+#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */
+
+#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
+
+/* Physical Func Reset Done Indication */
+#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index fa4171860623..d3e84416248e 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -979,6 +979,12 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) {
+ fsp->flow_type |= FLOW_EXT;
+ memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data));
+ }
+
mutex_unlock(&adapter->nfc_rule_lock);
return 0;
@@ -1215,6 +1221,30 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
ether_addr_copy(rule->filter.dst_addr,
fsp->h_u.ether_spec.h_dest);
}
+
+ /* VLAN etype matching */
+ if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
+ rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
+ }
+
+ /* Check for user defined data */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ (fsp->h_ext.data[0] || fsp->h_ext.data[1])) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA;
+ memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data));
+ memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
+ }
+
+ /* When multiple filter options or user data or vlan etype is set, use a
+ * flex filter.
+ */
+ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
+ (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
+ (rule->filter.match_flags & (rule->filter.match_flags - 1)))
+ rule->flex = true;
+ else
+ rule->flex = false;
}
/**
@@ -1244,11 +1274,6 @@ static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
- if (flags & (flags - 1)) {
- netdev_dbg(dev, "Rule with multiple matches not supported\n");
- return -EOPNOTSUPP;
- }
-
list_for_each_entry(tmp, &adapter->nfc_rule_list, list) {
if (!memcmp(&rule->filter, &tmp->filter,
sizeof(rule->filter)) &&
@@ -1280,12 +1305,6 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EOPNOTSUPP;
}
- if ((fsp->flow_type & FLOW_EXT) &&
- fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
- netdev_dbg(netdev, "VLAN mask not supported\n");
- return -EOPNOTSUPP;
- }
-
if (fsp->ring_cookie >= adapter->num_rx_queues) {
netdev_dbg(netdev, "Invalid action\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e29aadbc6744..b7aab35c1132 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3075,11 +3075,320 @@ static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
etype);
}
+static int igc_flex_filter_select(struct igc_adapter *adapter,
+ struct igc_flex_filter *input,
+ u32 *fhft)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 fhft_index;
+ u32 fhftsl;
+
+ if (input->index >= MAX_FLEX_FILTER) {
+ dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ return -EINVAL;
+ }
+
+ /* Indirect table select register */
+ fhftsl = rd32(IGC_FHFTSL);
+ fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
+ switch (input->index) {
+ case 0 ... 7:
+ fhftsl |= 0x00;
+ break;
+ case 8 ... 15:
+ fhftsl |= 0x01;
+ break;
+ case 16 ... 23:
+ fhftsl |= 0x02;
+ break;
+ case 24 ... 31:
+ fhftsl |= 0x03;
+ break;
+ }
+ wr32(IGC_FHFTSL, fhftsl);
+
+ /* Normalize index down to host table register */
+ fhft_index = input->index % 8;
+
+ *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
+ IGC_FHFT_EXT(fhft_index - 4);
+
+ return 0;
+}
+
+static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
+ struct igc_flex_filter *input)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct igc_hw *hw = &adapter->hw;
+ u8 *data = input->data;
+ u8 *mask = input->mask;
+ u32 queuing;
+ u32 fhft;
+ u32 wufc;
+ int ret;
+ int i;
+
+ /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
+ * out early to avoid surprises later.
+ */
+ if (input->length % 8 != 0) {
+ dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ /* Select corresponding flex filter register and get base for host table. */
+ ret = igc_flex_filter_select(adapter, input, &fhft);
+ if (ret)
+ return ret;
+
+ /* When adding a filter globally disable flex filter feature. That is
+ * recommended within the datasheet.
+ */
+ wufc = rd32(IGC_WUFC);
+ wufc &= ~IGC_WUFC_FLEX_HQ;
+ wr32(IGC_WUFC, wufc);
+
+ /* Configure filter */
+ queuing = input->length & IGC_FHFT_LENGTH_MASK;
+ queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
+ queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
+
+ if (input->immediate_irq)
+ queuing |= IGC_FHFT_IMM_INT;
+
+ if (input->drop)
+ queuing |= IGC_FHFT_DROP;
+
+ wr32(fhft + 0xFC, queuing);
+
+ /* Write data (128 byte) and mask (128 bit) */
+ for (i = 0; i < 16; ++i) {
+ const size_t data_idx = i * 8;
+ const size_t row_idx = i * 16;
+ u32 dw0 =
+ (data[data_idx + 0] << 0) |
+ (data[data_idx + 1] << 8) |
+ (data[data_idx + 2] << 16) |
+ (data[data_idx + 3] << 24);
+ u32 dw1 =
+ (data[data_idx + 4] << 0) |
+ (data[data_idx + 5] << 8) |
+ (data[data_idx + 6] << 16) |
+ (data[data_idx + 7] << 24);
+ u32 tmp;
+
+ /* Write row: dw0, dw1 and mask */
+ wr32(fhft + row_idx, dw0);
+ wr32(fhft + row_idx + 4, dw1);
+
+ /* mask is only valid for MASK(7, 0) */
+ tmp = rd32(fhft + row_idx + 8);
+ tmp &= ~GENMASK(7, 0);
+ tmp |= mask[i];
+ wr32(fhft + row_idx + 8, tmp);
+ }
+
+ /* Enable filter. */
+ wufc |= IGC_WUFC_FLEX_HQ;
+ if (input->index > 8) {
+ /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
+ u32 wufc_ext = rd32(IGC_WUFC_EXT);
+
+ wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
+
+ wr32(IGC_WUFC_EXT, wufc_ext);
+ } else {
+ wufc |= (IGC_WUFC_FLX0 << input->index);
+ }
+ wr32(IGC_WUFC, wufc);
+
+ dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
+ input->index);
+
+ return 0;
+}
+
+static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
+ const void *src, unsigned int offset,
+ size_t len, const void *mask)
+{
+ int i;
+
+ /* data */
+ memcpy(&flex->data[offset], src, len);
+
+ /* mask */
+ for (i = 0; i < len; ++i) {
+ const unsigned int idx = i + offset;
+ const u8 *ptr = mask;
+
+ if (mask) {
+ if (ptr[i] & 0xff)
+ flex->mask[idx / 8] |= BIT(idx % 8);
+
+ continue;
+ }
+
+ flex->mask[idx / 8] |= BIT(idx % 8);
+ }
+}
+
+static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc, wufc_ext;
+ int i;
+
+ wufc = rd32(IGC_WUFC);
+ wufc_ext = rd32(IGC_WUFC_EXT);
+
+ for (i = 0; i < MAX_FLEX_FILTER; i++) {
+ if (i < 8) {
+ if (!(wufc & (IGC_WUFC_FLX0 << i)))
+ return i;
+ } else {
+ if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc, wufc_ext;
+
+ wufc = rd32(IGC_WUFC);
+ wufc_ext = rd32(IGC_WUFC_EXT);
+
+ if (wufc & IGC_WUFC_FILTER_MASK)
+ return true;
+
+ if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
+ return true;
+
+ return false;
+}
+
+static int igc_add_flex_filter(struct igc_adapter *adapter,
+ struct igc_nfc_rule *rule)
+{
+ struct igc_flex_filter flex = { };
+ struct igc_nfc_filter *filter = &rule->filter;
+ unsigned int eth_offset, user_offset;
+ int ret, index;
+ bool vlan;
+
+ index = igc_find_avail_flex_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ /* Construct the flex filter:
+ * -> dest_mac [6]
+ * -> src_mac [6]
+ * -> tpid [2]
+ * -> vlan tci [2]
+ * -> ether type [2]
+ * -> user data [8]
+ * -> = 26 bytes => 32 length
+ */
+ flex.index = index;
+ flex.length = 32;
+ flex.rx_queue = rule->action;
+
+ vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
+ eth_offset = vlan ? 16 : 12;
+ user_offset = vlan ? 18 : 14;
+
+ /* Add destination MAC */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
+ ETH_ALEN, NULL);
+
+ /* Add source MAC */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
+ ETH_ALEN, NULL);
+
+ /* Add VLAN etype */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
+ igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
+ sizeof(filter->vlan_etype),
+ NULL);
+
+ /* Add VLAN TCI */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
+ sizeof(filter->vlan_tci), NULL);
+
+ /* Add Ether type */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ __be16 etype = cpu_to_be16(filter->etype);
+
+ igc_flex_filter_add_field(&flex, &etype, eth_offset,
+ sizeof(etype), NULL);
+ }
+
+ /* Add user data */
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
+ igc_flex_filter_add_field(&flex, &filter->user_data,
+ user_offset,
+ sizeof(filter->user_data),
+ filter->user_mask);
+
+ /* Add it down to the hardware and enable it. */
+ ret = igc_write_flex_filter_ll(adapter, &flex);
+ if (ret)
+ return ret;
+
+ filter->flex_index = index;
+
+ return 0;
+}
+
+static void igc_del_flex_filter(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 wufc;
+
+ /* Just disable the filter. The filter table itself is kept
+ * intact. Another flex_filter_add() should override the "old" data
+ * then.
+ */
+ if (reg_index > 8) {
+ u32 wufc_ext = rd32(IGC_WUFC_EXT);
+
+ wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
+ wr32(IGC_WUFC_EXT, wufc_ext);
+ } else {
+ wufc = rd32(IGC_WUFC);
+
+ wufc &= ~(IGC_WUFC_FLX0 << reg_index);
+ wr32(IGC_WUFC, wufc);
+ }
+
+ if (igc_flex_filter_in_use(adapter))
+ return;
+
+ /* No filters are in use, we may disable flex filters */
+ wufc = rd32(IGC_WUFC);
+ wufc &= ~IGC_WUFC_FLEX_HQ;
+ wr32(IGC_WUFC, wufc);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
- const struct igc_nfc_rule *rule)
+ struct igc_nfc_rule *rule)
{
int err;
+ if (rule->flex) {
+ return igc_add_flex_filter(adapter, rule);
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
err = igc_add_etype_filter(adapter, rule->filter.etype,
rule->action);
@@ -3116,6 +3425,11 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
static void igc_disable_nfc_rule(struct igc_adapter *adapter,
const struct igc_nfc_rule *rule)
{
+ if (rule->flex) {
+ igc_del_flex_filter(adapter, rule->filter.flex_index);
+ return;
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
igc_del_etype_filter(adapter, rule->filter.etype);
@@ -4811,6 +5125,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
*/
static int igc_request_msix(struct igc_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
int i = 0, err = 0, vector = 0, free_vector = 0;
struct net_device *netdev = adapter->netdev;
@@ -4819,7 +5134,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igc_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -4898,20 +5219,12 @@ bool igc_has_link(struct igc_adapter *adapter)
* false until the igc_check_for_link establishes link
* for copper adapters ONLY
*/
- switch (hw->phy.media_type) {
- case igc_media_type_copper:
- if (!hw->mac.get_link_status)
- return true;
- hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- break;
- default:
- case igc_media_type_unknown:
- break;
- }
+ if (!hw->mac.get_link_status)
+ return true;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
- if (hw->mac.type == igc_i225 &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (hw->mac.type == igc_i225) {
if (!netif_carrier_ok(adapter->netdev)) {
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
@@ -4999,7 +5312,9 @@ static void igc_watchdog_task(struct work_struct *work)
adapter->tx_timeout_factor = 14;
break;
case SPEED_100:
- /* maybe add some timeout factor ? */
+ case SPEED_1000:
+ case SPEED_2500:
+ adapter->tx_timeout_factor = 7;
break;
}
@@ -5698,7 +6013,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
- .ndo_do_ioctl = igc_ioctl,
+ .ndo_eth_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 83aeb5e7076f..5cad31c3c7b0 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
return ret_val;
}
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID)
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 0f82990567d9..828c3501c448 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -67,6 +67,9 @@
/* Filtering Registers */
#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */
+#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */
+#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */
/* ETQF register bit definitions */
#define IGC_ETQF_FILTER_ENABLE BIT(26)
@@ -75,6 +78,19 @@
#define IGC_ETQF_QUEUE_MASK 0x00070000
#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+/* FHFT register bit definitions */
+#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0)
+#define IGC_FHFT_QUEUE_SHIFT 8
+#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8)
+#define IGC_FHFT_PRIO_SHIFT 16
+#define IGC_FHFT_PRIO_MASK GENMASK(18, 16)
+#define IGC_FHFT_IMM_INT BIT(24)
+#define IGC_FHFT_DROP BIT(25)
+
+/* FHFTSL register bit definitions */
+#define IGC_FHFTSL_FTSL_SHIFT 0
+#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0)
+
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
/* RSS Random Key - RW Array */
@@ -240,6 +256,7 @@
#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */
#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */
/* Wake Up packet memory */
#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 174103c4bea6..4dbbb8a32ce9 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -52,7 +52,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
}
- wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
+ wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 14aea40da50f..24e06ba6f5e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10247,7 +10247,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_tx_maxrate = ixgbe_tx_maxrate,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
- .ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_eth_ioctl = ixgbe_ioctl,
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f1b9284e0bea..1251b74fe0e2 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2901,7 +2901,7 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_open = jme_open,
.ndo_stop = jme_close,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = jme_ioctl,
+ .ndo_eth_ioctl = jme_ioctl,
.ndo_start_xmit = jme_start_xmit,
.ndo_set_mac_address = jme_set_macaddr,
.ndo_set_rx_mode = jme_set_multi,
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index b30a45725374..3e9f324f1061 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1272,7 +1272,7 @@ static const struct net_device_ops korina_netdev_ops = {
.ndo_start_xmit = korina_send_packet,
.ndo_set_rx_mode = korina_multicast_list,
.ndo_tx_timeout = korina_tx_timeout,
- .ndo_do_ioctl = korina_ioctl,
+ .ndo_eth_ioctl = korina_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 2d0c52f7106b..62f8c5212182 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -609,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_stop = ltq_etop_stop,
.ndo_start_xmit = ltq_etop_tx,
.ndo_change_mtu = ltq_etop_change_mtu,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d207bfcaf31d..6502c5c2ebca 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3060,7 +3060,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mv643xx_eth_ioctl,
+ .ndo_eth_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_set_features = mv643xx_eth_set_features,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 76a7777c746d..5d1007e1b5c9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
if (!skb)
return ERR_PTR(-ENOMEM);
- skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
+ skb_mark_for_recycle(skb);
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
@@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE);
- /* We don't need to reset pp_recycle here. It's already set, so
- * just mark fragments for recycling.
- */
- page_pool_store_mem_info(skb_frag_page(frag), pool);
}
return skb;
@@ -2666,7 +2662,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
return 0;
if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
- pr_info("*** Is this even possible???!?!?\n");
+ pr_info("*** Is this even possible?\n");
return 0;
}
@@ -3832,12 +3828,20 @@ static void mvneta_validate(struct phylink_config *config,
struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_QSGMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !phy_interface_mode_is_rgmii(state->interface)) {
+ /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
+ * When in 802.3z mode, we must have AN enabled:
+ * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ */
+ if (phy_interface_mode_is_8023z(state->interface)) {
+ if (!phylink_test(state->advertising, Autoneg)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+ } else if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
return;
}
@@ -4986,7 +4990,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_change_mtu = mvneta_change_mtu,
.ndo_fix_features = mvneta_fix_features,
.ndo_get_stats64 = mvneta_get_stats64,
- .ndo_do_ioctl = mvneta_ioctl,
+ .ndo_eth_ioctl = mvneta_ioctl,
.ndo_bpf = mvneta_xdp,
.ndo_xdp_xmit = mvneta_xdp_xmit,
.ndo_setup_tc = mvneta_setup_tc,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 3229bafa2a2c..744f58f41ecc 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
if (pp)
- skb_mark_for_recycle(skb, page, pp);
+ skb_mark_for_recycle(skb);
else
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE,
@@ -5702,7 +5702,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_mac_address = mvpp2_set_mac_address,
.ndo_change_mtu = mvpp2_change_mtu,
.ndo_get_stats64 = mvpp2_get_stats64,
- .ndo_do_ioctl = mvpp2_ioctl,
+ .ndo_eth_ioctl = mvpp2_ioctl,
.ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
.ndo_set_features = mvpp2_set_features,
@@ -6269,6 +6269,15 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
if (!mvpp2_port_supports_rgmii(port))
goto empty_set;
break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (!phylink_test(state->advertising, Autoneg))
+ goto empty_set;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 16caa02095fe..2aa0ae8abfbb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -12,6 +12,7 @@ config OCTEONTX2_AF
select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Marvell's OcteonTX2 Resource Virtualization
Unit's admin function manager which manages all RVU HW resources
@@ -32,6 +33,7 @@ config OCTEONTX2_PF
select OCTEONTX2_MBOX
depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 47f5ed006a93..752ba6b4b919 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -146,10 +146,7 @@ enum nix_scheduler {
#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
-#define MAX_SCHED_WEIGHT 0xFF
-#define DFLT_RR_WEIGHT 71
-#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
- / MAX_SCHED_WEIGHT)
+#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index f5ec39de026a..add4a39edced 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1032,8 +1032,12 @@ struct nix_bp_cfg_rsp {
struct nix_hw_info {
struct mbox_msghdr hdr;
+ u16 rsvs16;
u16 max_mtu;
u16 min_mtu;
+ u32 rpm_dwrr_mtu;
+ u32 sdp_dwrr_mtu;
+ u64 rsvd[16]; /* Add reserved fields for future expansion */
};
struct nix_bandprof_alloc_req {
@@ -1074,6 +1078,13 @@ enum npc_af_status {
NPC_MCAM_ALLOC_DENIED = -702,
NPC_MCAM_ALLOC_FAILED = -703,
NPC_MCAM_PERM_DENIED = -704,
+ NPC_FLOW_INTF_INVALID = -707,
+ NPC_FLOW_CHAN_INVALID = -708,
+ NPC_FLOW_NO_NIXLF = -709,
+ NPC_FLOW_NOT_SUPPORTED = -710,
+ NPC_FLOW_VF_PERM_DENIED = -711,
+ NPC_FLOW_VF_NOT_INIT = -712,
+ NPC_FLOW_VF_OVERLAP = -713,
};
struct npc_mcam_alloc_entry_req {
@@ -1422,4 +1433,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
+/* CGX mailbox error codes
+ * Range 1101 - 1200.
+ */
+enum cgx_af_status {
+ LMAC_AF_ERR_INVALID_PARAM = -1101,
+ LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
+ LMAC_AF_ERR_PERM_DENIED = -1103,
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5fe277e354f7..c2438ba5e2ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -924,16 +924,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfreset_reg = NPA_AF_LF_RST;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NPA LF bitmap\n", __func__);
return err;
+ }
nix:
err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
return err;
+ }
+
err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
return err;
+ }
/* Init SSO group's bitmap */
block = &hw->block[BLKADDR_SSO];
@@ -953,8 +963,11 @@ nix:
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSO LF bitmap\n", __func__);
return err;
+ }
ssow:
/* Init SSO workslot's bitmap */
@@ -974,8 +987,11 @@ ssow:
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSOW LF bitmap\n", __func__);
return err;
+ }
tim:
/* Init TIM LF's bitmap */
@@ -996,35 +1012,53 @@ tim:
block->lfreset_reg = TIM_AF_LF_RST;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate TIM LF bitmap\n", __func__);
return err;
+ }
cpt:
err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
return err;
+ }
err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
return err;
+ }
/* Allocate memory for PFVF data */
rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
- if (!rvu->pf)
+ if (!rvu->pf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
+ }
rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
- if (!rvu->hwvf)
+ if (!rvu->hwvf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
+ }
mutex_init(&rvu->rsrc_lock);
rvu_fwdata_init(rvu);
err = rvu_setup_msix_resources(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to setup MSIX resources\n", __func__);
return err;
+ }
for (blkid = 0; blkid < BLK_COUNT; blkid++) {
block = &hw->block[blkid];
@@ -1050,25 +1084,33 @@ cpt:
goto msix_err;
err = rvu_npc_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
goto npc_err;
+ }
err = rvu_cgx_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
goto cgx_err;
+ }
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
goto npa_err;
+ }
rvu_get_lbk_bufsize(rvu);
err = rvu_nix_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
goto nix_err;
+ }
rvu_program_channels(rvu);
@@ -2984,27 +3026,37 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
rvu->hw->total_pfs, rvu_afpf_mbox_handler,
rvu_afpf_mbox_up_handler);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
goto err_hwsetup;
+ }
err = rvu_flr_init(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize flr\n", __func__);
goto err_mbox;
+ }
err = rvu_register_interrupts(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to register interrupts\n", __func__);
goto err_flr;
+ }
err = rvu_register_dl(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to register devlink\n", __func__);
goto err_irq;
+ }
rvu_setup_rvum_blk_revid(rvu);
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to enable sriov\n", __func__);
goto err_dl;
+ }
/* Initialize debugfs */
rvu_dbg_init(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 91503fb2762c..d88f595e63b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -329,6 +329,7 @@ struct hw_cap {
bool nix_shaping; /* Is shaping and coloring supported */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
+ bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
@@ -355,6 +356,7 @@ struct rvu_hwinfo {
u16 npc_counters; /* No of match stats counters */
u32 lbk_bufsize; /* FIFO size supported by LBK */
bool npc_ext_set; /* Extended register set */
+ u64 npc_stat_ena; /* Match stats enable bit */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
@@ -706,6 +708,8 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_cn10k_aq_enq_rsp *aq_rsp,
u16 pcifunc, u8 ctype, u32 qidx);
int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
+u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index fe99ac4a4dd8..d34e59525a09 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -448,7 +448,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -507,7 +507,7 @@ static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -ENODEV;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
@@ -561,7 +561,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
@@ -888,7 +888,7 @@ int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
- return -EPERM;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
@@ -1046,7 +1046,7 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_reset(cgx_id, lmac_id);
@@ -1060,7 +1060,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 2688186066d9..a55b46ad162d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1364,6 +1364,89 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_nix_health_reporters_destroy(rvu_dl);
}
+/* Devlink Params APIs */
+static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int dwrr_mtu = val.vu32;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting DWRR_MTU is not supported on this silicon");
+ return -EOPNOTSUPP;
+ }
+
+ if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
+ (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
+ return -EINVAL;
+ }
+
+ nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ return -ENODEV;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing DWRR MTU is not supported when there are active NIXLFs");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Make sure none of the PF/VF interfaces are initialized and retry");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
+ rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ return -EOPNOTSUPP;
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+enum rvu_af_dl_param_id {
+ RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+};
+
+static const struct devlink_param rvu_af_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
+ rvu_af_dl_dwrr_mtu_validate),
+};
+
+/* Devlink switch mode */
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
@@ -1420,13 +1503,14 @@ int rvu_register_dl(struct rvu *rvu)
struct devlink *dl;
int err;
- dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
+ dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
+ rvu->dev);
if (!dl) {
dev_warn(rvu->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
- err = devlink_register(dl, rvu->dev);
+ err = devlink_register(dl);
if (err) {
dev_err(rvu->dev, "devlink register failed with error %d\n", err);
devlink_free(dl);
@@ -1438,7 +1522,30 @@ int rvu_register_dl(struct rvu *rvu)
rvu_dl->rvu = rvu;
rvu->rvu_dl = rvu_dl;
- return rvu_health_reporters_create(rvu);
+ err = rvu_health_reporters_create(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink health reporter creation failed with error %d\n", err);
+ goto err_dl_health;
+ }
+
+ err = devlink_params_register(dl, rvu_af_dl_params,
+ ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
+ devlink_params_publish(dl);
+
+ return 0;
+
+err_dl_health:
+ rvu_health_reporters_destroy(rvu);
+ devlink_unregister(dl);
+ devlink_free(dl);
+ return err;
}
void rvu_unregister_dl(struct rvu *rvu)
@@ -1449,6 +1556,8 @@ void rvu_unregister_dl(struct rvu *rvu)
if (!dl)
return;
+ devlink_params_unregister(dl, rvu_af_dl_params,
+ ARRAY_SIZE(rvu_af_dl_params));
rvu_health_reporters_destroy(rvu);
devlink_unregister(dl);
devlink_free(dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4bfbbdf38770..22039d9ce70a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -192,6 +192,47 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
return NULL;
}
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
+{
+ dwrr_mtu &= 0x1FULL;
+
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ switch (dwrr_mtu) {
+ case 4:
+ return 9728;
+ case 5:
+ return 10240;
+ default:
+ return BIT_ULL(dwrr_mtu);
+ }
+
+ return 0;
+}
+
+u32 convert_bytes_to_dwrr_mtu(u32 bytes)
+{
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ if (bytes > BIT_ULL(16))
+ return 0;
+
+ switch (bytes) {
+ case 9728:
+ return 4;
+ case 10240:
+ return 5;
+ default:
+ return ilog2(bytes);
+ }
+
+ return 0;
+}
+
static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
@@ -943,7 +984,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
}
@@ -1364,7 +1405,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
cfg = (((u32)req->offset & 0x7) << 16) |
(((u32)req->y_mask & 0xF) << 12) |
@@ -1632,7 +1673,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
mutex_lock(&rvu->rsrc_lock);
@@ -1754,7 +1795,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
@@ -1825,7 +1866,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
@@ -1958,8 +1999,17 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
return;
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
(TXSCH_TL1_DFLT_RR_PRIO << 1));
- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
- TXSCH_TL1_DFLT_RR_QTM);
+
+ /* On OcteonTx2 the config was in bytes and newer silcons
+ * it's changed to weight.
+ */
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ else
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ CN10K_MAX_DWRR_WEIGHT);
+
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
@@ -2016,7 +2066,7 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
@@ -2114,8 +2164,12 @@ static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
u16 pcifunc, int index)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
- struct nix_txvlan *vlan = &nix_hw->txvlan;
+ struct nix_txvlan *vlan;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ vlan = &nix_hw->txvlan;
if (vlan->entry2pfvf_map[index] != pcifunc)
return NIX_AF_ERR_PARAM;
@@ -2156,10 +2210,15 @@ static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
u64 vtag, u8 size)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
- struct nix_txvlan *vlan = &nix_hw->txvlan;
+ struct nix_txvlan *vlan;
u64 regval;
int index;
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+
mutex_lock(&vlan->rsrc_lock);
index = rvu_alloc_rsrc(&vlan->rsrc);
@@ -2184,12 +2243,16 @@ static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
struct nix_vtag_config *req)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
- struct nix_txvlan *vlan = &nix_hw->txvlan;
u16 pcifunc = req->hdr.pcifunc;
int idx0 = req->tx.vtag0_idx;
int idx1 = req->tx.vtag1_idx;
+ struct nix_txvlan *vlan;
int err = 0;
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
if (req->tx.free_vtag0 && req->tx.free_vtag1)
if (vlan->entry2pfvf_map[idx0] != pcifunc ||
vlan->entry2pfvf_map[idx1] != pcifunc)
@@ -2216,9 +2279,13 @@ static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
struct nix_vtag_config_rsp *rsp)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
- struct nix_txvlan *vlan = &nix_hw->txvlan;
+ struct nix_txvlan *vlan;
u16 pcifunc = req->hdr.pcifunc;
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
if (req->tx.cfg_vtag0) {
rsp->vtag0_idx =
nix_tx_vtag_alloc(rvu, blkaddr,
@@ -2667,6 +2734,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
for (schq = 0; schq < txsch->schq.max; schq++)
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
+
+ /* Setup a default value of 8192 as DWRR MTU */
+ if (rvu->hw->cap.nix_common_dwrr_mtu) {
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ }
+
return 0;
}
@@ -2743,6 +2819,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
struct nix_hw_info *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ u64 dwrr_mtu;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -2755,6 +2832,20 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
rsp->min_mtu = NIC_HW_MIN_FRS;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ /* Return '1' on OTx2 */
+ rsp->rpm_dwrr_mtu = 1;
+ rsp->sdp_dwrr_mtu = 1;
+ return 0;
+ }
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
+ rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
return 0;
}
@@ -3068,7 +3159,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
hw = get_nix_hw(rvu->hw, blkaddr);
if (!hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
/* No room to add new flow hash algoritham */
if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
@@ -3108,7 +3199,7 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
/* Failed to get algo index from the exiting list, reserve new */
@@ -3385,7 +3476,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
if (is_afvf(pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
@@ -3647,6 +3738,28 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
return 0;
}
+static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 hw_const;
+
+ hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* On OcteonTx2 DWRR quantum is directly configured into each of
+ * the transmit scheduler queues. And PF/VF drivers were free to
+ * config any value upto 2^24.
+ * On CN10K, HW is modified, the quantum configuration at scheduler
+ * queues is in terms of weight. And SW needs to setup a base DWRR MTU
+ * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
+ * 'DWRR MTU * weight' to get the quantum.
+ *
+ * Check if HW uses a common MTU for all DWRR quantum configs.
+ * On OcteonTx2 this register field is '0'.
+ */
+ if (((hw_const >> 56) & 0x10) == 0x10)
+ hw->cap.nix_common_dwrr_mtu = true;
+}
+
static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
{
const struct npc_lt_def_cfg *ltdefs;
@@ -3684,6 +3797,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ /* Setup capabilities of the NIX block */
+ rvu_nix_setup_capabilities(rvu, blkaddr);
+
/* Initialize admin queue */
err = nix_aq_init(rvu, block);
if (err)
@@ -4027,7 +4143,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
/* Find existing matching LSO format, if any */
for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 52b255426c22..6f231008c8a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -724,7 +724,17 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.index = pfvf->promisc_mce_idx;
}
- req.chan_mask = 0xFFFU;
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu)) {
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ } else {
+ req.chan_mask = 0xFFFU;
+ }
+
if (chan_cnt > 1) {
if (!is_power_of_2(chan_cnt)) {
dev_err(rvu->dev,
@@ -1898,9 +1908,22 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
mcam->banks = (npc_const >> 44) & 0xFULL;
mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ hw->npc_stat_ena = BIT_ULL(9);
/* Extended set */
if (npc_const2) {
hw->npc_ext_set = true;
+ /* 96xx supports only match_stats and npc_counters
+ * reflected in NPC_AF_CONST reg.
+ * STAT_SEL and ENA are at [0:8] and 9 bit positions.
+ * 98xx has both match_stat and ext and npc_counter
+ * reflected in NPC_AF_CONST2
+ * STAT_SEL_EXT added at [12:14] bit position.
+ * cn10k supports only ext and hence npc_counters in
+ * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2.
+ * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
+ */
+ if (!hw->npc_counters)
+ hw->npc_stat_ena = BIT_ULL(63);
hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
mcam->banksize = npc_const2 & 0xFFFFULL;
}
@@ -1955,7 +1978,7 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NPC_AF_INTFX_MISS_STAT_ACT(intf),
((mcam->rx_miss_act_cntr >> 9) << 12) |
- BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ hw->npc_stat_ena | mcam->rx_miss_act_cntr);
}
/* Configure TX interfaces */
@@ -2147,18 +2170,16 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 entry, u16 cntr)
{
u16 index = entry & (mcam->banksize - 1);
- u16 bank = npc_get_bank(mcam, entry);
+ u32 bank = npc_get_bank(mcam, entry);
+ struct rvu_hwinfo *hw = rvu->hw;
/* Set mapping and increment counter's refcnt */
mcam->entry2cntr_map[entry] = cntr;
mcam->cntr_refcnt[cntr]++;
- /* Enable stats
- * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9]
- * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0]
- */
+ /* Enable stats */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
- ((cntr >> 9) << 12) | BIT_ULL(9) | cntr);
+ ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr);
}
static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
@@ -2414,6 +2435,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
+ /* For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF and NOT a priority
+ * allocation request then allocate low priority entries.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
+
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2439,6 +2471,7 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
/* Not enough free entries, search all entries in reverse,
* so that low priority ones will get used up.
*/
+lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
@@ -3252,7 +3285,7 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
/* read MCAM entry STAT_ACT register */
regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
- if (!(regval & BIT_ULL(9))) {
+ if (!(regval & rvu->hw->npc_stat_ena)) {
rsp->stat_ena = 0;
mutex_unlock(&mcam->lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 5c01cf4a9c5b..9bde1bb7e148 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -600,7 +600,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
dev_info(rvu->dev, "Unsupported flow(s):\n");
for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
- return NIX_AF_ERR_NPC_KEY_NOT_SUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -995,13 +995,11 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_npc_mcam_rule dummy = { 0 };
struct rvu_npc_mcam_rule *rule;
- bool new = false, msg_from_vf;
u16 owner = req->hdr.pcifunc;
struct msg_rsp write_rsp;
struct mcam_entry *entry;
int entry_index, err;
-
- msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK);
+ bool new = false;
installed_features = req->features;
features = req->features;
@@ -1027,7 +1025,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
}
/* update mcam entry with default unicast rule attributes */
- if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) {
+ if (def_ucast_rule && (req->default_rule && req->append)) {
missing_features = (def_ucast_rule->features ^ features) &
def_ucast_rule->features;
if (missing_features)
@@ -1130,6 +1128,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
struct npc_install_flow_rsp *rsp)
{
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ struct rvu_switch *rswitch = &rvu->rswitch;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
bool pf_set_vfs_mac = false;
@@ -1139,14 +1138,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
- return -ENODEV;
+ return NPC_MCAM_INVALID_REQ;
}
if (!is_npc_interface_valid(rvu, req->intf))
- return -EINVAL;
+ return NPC_FLOW_INTF_INVALID;
if (from_vf && req->default_rule)
- return NPC_MCAM_PERM_DENIED;
+ return NPC_FLOW_VF_PERM_DENIED;
/* Each PF/VF info is maintained in struct rvu_pfvf.
* rvu_pfvf for the target PF/VF needs to be retrieved
@@ -1172,12 +1171,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
err = npc_check_unsupported_flows(rvu, req->features, req->intf);
if (err)
- return err;
+ return NPC_FLOW_NOT_SUPPORTED;
/* Skip channel validation if AF is installing */
if (!is_pffunc_af(req->hdr.pcifunc) &&
npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
- return -EINVAL;
+ return NPC_FLOW_CHAN_INVALID;
pfvf = rvu_get_pfvf(rvu, target);
@@ -1195,7 +1194,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
/* Proceed if NIXLF is attached or not for TX rules */
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
- return -EINVAL;
+ return NPC_FLOW_NO_NIXLF;
/* don't enable rule when nixlf not attached or initialized */
if (!(is_nixlf_attached(rvu, target) &&
@@ -1211,7 +1210,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
/* Do not allow requests from uninitialized VFs */
if (from_vf && !enable)
- return -EINVAL;
+ return NPC_FLOW_VF_NOT_INIT;
/* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
if (pf_set_vfs_mac && !enable) {
@@ -1221,15 +1220,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
return 0;
}
- /* If message is from VF then its flow should not overlap with
- * reserved unicast flow.
- */
- if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) &&
- pfvf->def_ucast_rule->features & req->features)
- return -EINVAL;
+ mutex_lock(&rswitch->switch_lock);
+ err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
+ req, rsp, enable, pf_set_vfs_mac);
+ mutex_unlock(&rswitch->switch_lock);
- return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp,
- enable, pf_set_vfs_mac);
+ return err;
}
static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 8b01ef6e2c99..6efcf3afff40 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -269,6 +269,8 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
+#define NIX_AF_DWRR_SDP_MTU (0x790)
+#define NIX_AF_DWRR_RPM_MTU (0x7A0)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
#define NIX_AF_TX_EXPR_CREDIT (0x830)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 64aa7d350df1..6af97ce69443 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -14,6 +14,8 @@
#include <linux/tracepoint.h>
#include <linux/pci.h>
+#include "mbox.h"
+
TRACE_EVENT(otx2_msg_alloc,
TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
TP_ARGS(pdev, id, size),
@@ -25,8 +27,8 @@ TRACE_EVENT(otx2_msg_alloc,
__entry->id = id;
__entry->size = size;
),
- TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
- __entry->id, __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size)
);
TRACE_EVENT(otx2_msg_send,
@@ -88,8 +90,8 @@ TRACE_EVENT(otx2_msg_process,
__entry->id = id;
__entry->err = err;
),
- TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
- __entry->id, __entry->err)
+ TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->err)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 3254b02205ca..fcaa7df404f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,8 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
-rvu_nicvf-y := otx2_vf.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o otx2_devlink.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 184de9466286..ccffddad1233 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -92,8 +92,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/
- aq->sq.smq_rr_weight = pfvf->netdev->mtu;
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 1a1ae334477d..e07723d71a26 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -9,6 +9,20 @@
#include "otx2_common.h"
+static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
+{
+ u32 weight;
+
+ /* On OTx2, since AF returns DWRR_MTU as '1', this logic
+ * will work on those silicons as well.
+ */
+ weight = mtu / pfvf->hw.dwrr_mtu;
+ if (mtu % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 70fcc1fd962f..ce799b7a8449 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -596,6 +596,9 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
+ u64 dwrr_val;
+
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -621,21 +624,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
/* Set DWRR quantum */
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
- req->regval[2] = DFLT_RR_QTM;
+ req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
- req->regval[1] = DFLT_RR_QTM;
+ req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
- req->regval[1] = DFLT_RR_QTM;
+ req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -643,7 +646,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
@@ -656,7 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
* For VF this is always ignored.
*/
- /* Set DWRR quantum */
+ /* On CN10K, if RR_WEIGHT is greater than 16384, HW will
+ * clip it to 16384, so configuring a 24bit max value
+ * will work on both OTx2 and CN10K.
+ */
req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
@@ -803,7 +809,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = DFLT_RR_QTM;
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -1668,6 +1674,11 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
* SMQ errors
*/
max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
+
+ /* Also save DWRR MTU, needed for DWRR weight calculation */
+ pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
+ if (!pfvf->hw.dwrr_mtu)
+ pfvf->hw.dwrr_mtu = 1;
}
out:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 8fd58cd07f50..c4147b64e059 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -19,11 +19,13 @@
#include <linux/timecounter.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
+#include <net/devlink.h>
#include <mbox.h>
#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include "otx2_devlink.h"
#include <rvu_trace.h>
/* PCI device IDs */
@@ -181,6 +183,7 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
+ u32 dwrr_mtu;
/* HW settings, coalescing etc */
u16 rx_chan_base;
@@ -267,7 +270,6 @@ struct otx2_mac_table {
};
struct otx2_flow_config {
- u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
u16 *flow_ent;
u16 *def_ent;
u16 nr_flows;
@@ -278,16 +280,13 @@ struct otx2_flow_config {
#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
- u16 ntuple_offset;
u16 unicast_offset;
u16 rx_vlan_offset;
u16 vf_vlan_offset;
#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
- u16 tc_flower_offset;
- u16 ntuple_max_flows;
- u16 tc_max_flows;
+ u16 max_flows;
u8 dmacflt_max_flows;
u8 *bmap_to_dmacindex;
unsigned long dmacflt_bmap;
@@ -298,8 +297,7 @@ struct otx2_tc_info {
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
- DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS);
- unsigned long num_entries;
+ unsigned long *tc_entries_bitmap;
};
struct dev_hw_ops {
@@ -352,6 +350,11 @@ struct otx2_nic {
struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo;
+ /* NPC MCAM */
+ struct otx2_flow_config *flow_cfg;
+ struct otx2_mac_table *mac_table;
+ struct otx2_tc_info tc_info;
+
u64 reset_count;
struct work_struct reset_task;
struct workqueue_struct *flr_wq;
@@ -359,7 +362,6 @@ struct otx2_nic {
struct refill_work *refill_wrk;
struct workqueue_struct *otx2_wq;
struct work_struct rx_mode_work;
- struct otx2_mac_table *mac_table;
/* Ethtool stuff */
u32 msg_enable;
@@ -375,9 +377,10 @@ struct otx2_nic {
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
- struct otx2_flow_config *flow_cfg;
- struct otx2_tc_info tc_info;
unsigned long rq_bmap;
+
+ /* Devlink */
+ struct otx2_devlink *dl;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -709,6 +712,11 @@ MBOX_UP_CGX_MESSAGES
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
+static inline bool is_otx2_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
static inline int rvu_get_pf(u16 pcifunc)
{
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
@@ -814,7 +822,8 @@ int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
-int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
void otx2_mcam_flow_del(struct otx2_nic *pf);
int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
@@ -827,6 +836,7 @@ int otx2_add_flow(struct otx2_nic *pfvf,
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
@@ -838,6 +848,7 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
new file mode 100644
index 000000000000..7ac3ef2fa06a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+/* Devlink Params APIs */
+static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ if (flow_cfg && flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+ otx2_tc_alloc_ent_bitmap(pfvf);
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ ctx->val.vu16 = 0;
+ return 0;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ ctx->val.vu16 = flow_cfg->max_flows;
+
+ return 0;
+}
+
+enum otx2_dl_param_id {
+ OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+};
+
+static const struct devlink_param otx2_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ "mcam_count", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
+ otx2_dl_mcam_count_validate),
+};
+
+/* Devlink OPs */
+static int otx2_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (is_otx2_vf(pfvf->pcifunc))
+ return devlink_info_driver_name_put(req, "rvu_nicvf");
+
+ return devlink_info_driver_name_put(req, "rvu_nicpf");
+}
+
+static const struct devlink_ops otx2_devlink_ops = {
+ .info_get = otx2_devlink_info_get,
+};
+
+int otx2_register_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&otx2_devlink_ops,
+ sizeof(struct otx2_devlink), pfvf->dev);
+ if (!dl) {
+ dev_warn(pfvf->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl);
+ if (err) {
+ dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
+ devlink_free(dl);
+ return err;
+ }
+
+ otx2_dl = devlink_priv(dl);
+ otx2_dl->dl = dl;
+ otx2_dl->pfvf = pfvf;
+ pfvf->dl = otx2_dl;
+
+ err = devlink_params_register(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+ if (err) {
+ dev_err(pfvf->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl;
+ }
+
+ devlink_params_publish(dl);
+
+ return 0;
+
+err_dl:
+ devlink_unregister(dl);
+ devlink_free(dl);
+ return err;
+}
+
+void otx2_unregister_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl = pfvf->dl;
+ struct devlink *dl;
+
+ if (!otx2_dl || !otx2_dl->dl)
+ return;
+
+ dl = otx2_dl->dl;
+
+ devlink_params_unregister(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
new file mode 100644
index 000000000000..c7bd4f3c6c6b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef OTX2_DEVLINK_H
+#define OTX2_DEVLINK_H
+
+struct otx2_devlink {
+ struct devlink *dl;
+ struct otx2_nic *pfvf;
+};
+
+/* Devlink APIs */
+int otx2_register_dl(struct otx2_nic *pfvf);
+void otx2_unregister_dl(struct otx2_nic *pfvf);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index b906a0eb6e0d..620da08db317 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -645,6 +645,7 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
static int otx2_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rules)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -654,14 +655,18 @@ static int otx2_get_rxnfc(struct net_device *dev,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
- nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
- ret = 0;
+ if (netif_running(dev) && ntuple) {
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ }
break;
case ETHTOOL_GRXCLSRULE:
- ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
- ret = otx2_get_all_flows(pfvf, nfc, rules);
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
@@ -696,41 +701,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
return ret;
}
-static int otx2vf_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *nfc, u32 *rules)
-{
- struct otx2_nic *pfvf = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (nfc->cmd) {
- case ETHTOOL_GRXRINGS:
- nfc->data = pfvf->hw.rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- return otx2_get_rss_hash_opts(pfvf, nfc);
- default:
- break;
- }
- return ret;
-}
-
-static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
-{
- struct otx2_nic *pfvf = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (nfc->cmd) {
- case ETHTOOL_SRXFH:
- ret = otx2_set_rss_hash_opts(pfvf, nfc);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -1357,8 +1327,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
- .get_rxnfc = otx2vf_get_rxnfc,
- .set_rxnfc = otx2vf_set_rxnfc,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 4d9de525802d..2a25588a01ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -5,11 +5,14 @@
*/
#include <net/ipv6.h>
+#include <linux/sort.h>
#include "otx2_common.h"
#define OTX2_DEFAULT_ACTION 0x1
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
+
struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
@@ -30,8 +33,7 @@ static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_
{
devm_kfree(pfvf->dev, flow_cfg->flow_ent);
flow_cfg->flow_ent = NULL;
- flow_cfg->ntuple_max_flows = 0;
- flow_cfg->tc_max_flows = 0;
+ flow_cfg->max_flows = 0;
}
static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
@@ -40,11 +42,11 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
struct npc_mcam_free_entry_req *req;
int ent, err;
- if (!flow_cfg->ntuple_max_flows)
+ if (!flow_cfg->max_flows)
return 0;
mutex_lock(&pfvf->mbox.lock);
- for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
+ for (ent = 0; ent < flow_cfg->max_flows; ent++) {
req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
if (!req)
break;
@@ -61,7 +63,12 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
-static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
+static int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
@@ -76,8 +83,12 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
sizeof(u16), GFP_KERNEL);
- if (!flow_cfg->flow_ent)
+ if (!flow_cfg->flow_ent) {
+ netdev_err(pfvf->netdev,
+ "%s: Unable to allocate memory for flow entries\n",
+ __func__);
return -ENOMEM;
+ }
mutex_lock(&pfvf->mbox.lock);
@@ -92,8 +103,14 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
req->contig = false;
req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
- req->priority = NPC_MCAM_HIGHER_PRIO;
- req->ref_entry = flow_cfg->def_ent[0];
+
+ /* Allocate higher priority entries for PFs, so that VF's entries
+ * will be on top of PF.
+ */
+ if (!is_otx2_vf(pfvf->pcifunc)) {
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+ }
/* Send message to AF */
if (otx2_sync_mbox_msg(&pfvf->mbox))
@@ -114,22 +131,34 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
break;
}
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
+ * otherwise user installed ntuple filter index and MCAM entry index will
+ * not be in sync.
+ */
+ if (allocated)
+ sort(&flow_cfg->flow_ent[0], allocated,
+ sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
exit:
mutex_unlock(&pfvf->mbox.lock);
- flow_cfg->ntuple_offset = 0;
- flow_cfg->ntuple_max_flows = allocated;
- flow_cfg->tc_max_flows = allocated;
+ flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ }
if (allocated != count)
netdev_info(pfvf->netdev,
- "Unable to allocate %d MCAM entries for ntuple, got %d\n",
+ "Unable to allocate %d MCAM entries, got only %d\n",
count, allocated);
-
return allocated;
}
+EXPORT_SYMBOL(otx2_alloc_mcam_entries);
-int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
@@ -189,18 +218,35 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
}
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
return 0;
}
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg;
+
+ pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
+ sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pfvf->flow_cfg)
+ return -ENOMEM;
+
+ flow_cfg = pfvf->flow_cfg;
+ INIT_LIST_HEAD(&flow_cfg->flow_list);
+ flow_cfg->max_flows = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2vf_mcam_flow_init);
+
int otx2_mcam_flow_init(struct otx2_nic *pf)
{
int err;
@@ -212,7 +258,10 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
- err = otx2_alloc_mcam_entries(pf);
+ /* Allocate bare minimum number of MCAM entries needed for
+ * unicast and ntuple filters.
+ */
+ err = otx2_mcam_entry_init(pf);
if (err)
return err;
@@ -248,6 +297,7 @@ void otx2_mcam_flow_del(struct otx2_nic *pf)
{
otx2_destroy_mcam_flows(pf);
}
+EXPORT_SYMBOL(otx2_mcam_flow_del);
/* On success adds mcam entry
* On failure enable promisous mode
@@ -379,15 +429,19 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
-static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
{
- if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+ if (!flow_cfg)
+ return 0;
+
+ if (flow_cfg->nr_flows == flow_cfg->max_flows ||
bitmap_weight(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
- return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+ return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
- return flow_cfg->ntuple_max_flows;
+ return flow_cfg->max_flows;
}
+EXPORT_SYMBOL(otx2_get_maxflows);
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
@@ -732,7 +786,7 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
ether_addr_copy(pmask->dmac, eth_mask->h_dest);
req->features |= BIT_ULL(NPC_DMAC);
}
- if (eth_mask->h_proto) {
+ if (eth_hdr->h_proto) {
memcpy(&pkt->etype, &eth_hdr->h_proto,
sizeof(pkt->etype));
memcpy(&pmask->etype, &eth_mask->h_proto,
@@ -767,11 +821,6 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
if (fsp->m_ext.vlan_etype)
return -EINVAL;
if (fsp->m_ext.vlan_tci) {
- if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
- return -EINVAL;
- if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
-
memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
sizeof(pkt->vlan_tci));
memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
@@ -894,7 +943,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
pf_mac->entry = 0;
pf_mac->dmac_filter = true;
- pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+ pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
pf_mac->flow_spec.location = pf_mac->location;
@@ -975,7 +1024,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
flow->dmac_filter = true;
flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
- fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+ fsp->location = flow_cfg->max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
@@ -983,11 +1032,11 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
- if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+ if (flow->location >= pfvf->flow_cfg->max_flows) {
netdev_warn(pfvf->netdev,
"Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
flow->location,
- flow_cfg->ntuple_max_flows - 1);
+ flow_cfg->max_flows - 1);
err = -EINVAL;
} else {
flow->entry = flow_cfg->flow_ent[flow->location];
@@ -996,6 +1045,8 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
}
if (err) {
+ if (err == MBOX_MSG_INVALID)
+ err = -EINVAL;
if (new)
kfree(flow);
return err;
@@ -1140,7 +1191,7 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
}
req->start = flow_cfg->flow_ent[0];
- req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
+ req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2c24944a4dba..7dd56c9392ab 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1787,17 +1787,10 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
- /* check if n-tuple filters are ON */
- if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) {
- netdev_info(dev, "Disabling n-tuple filters\n");
- features &= ~NETIF_F_NTUPLE;
- }
-
- /* check if tc hw offload is ON */
- if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) {
- netdev_info(dev, "Disabling TC hardware offload\n");
- features &= ~NETIF_F_HW_TC;
- }
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
return features;
}
@@ -1854,6 +1847,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t changed = features ^ netdev->features;
bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
+ bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -1866,12 +1860,26 @@ static int otx2_set_features(struct net_device *netdev,
if ((changed & NETIF_F_NTUPLE) && !ntuple)
otx2_destroy_ntuple_flows(pf);
- if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
- pf->tc_info.num_entries) {
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pf->flow_cfg && pf->flow_cfg->nr_flows) {
netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
return -EBUSY;
}
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2331,7 +2339,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
- .ndo_do_ioctl = otx2_ioctl,
+ .ndo_eth_ioctl = otx2_ioctl,
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
@@ -2569,8 +2577,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
- netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
-
err = otx2_mcam_flow_init(pf);
if (err)
goto err_ptp_destroy;
@@ -2594,12 +2600,13 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
- /* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
@@ -2619,6 +2626,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_mcam_flow_del;
+ err = otx2_register_dl(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
/* Initialize SR-IOV resources */
err = otx2_sriov_vfcfg_init(pf);
if (err)
@@ -2776,6 +2787,7 @@ static void otx2_remove(struct pci_dev *pdev)
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
+ otx2_unregister_dl(pf);
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 972b202b9884..81840b625c68 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -52,6 +52,29 @@ struct otx2_tc_flow {
bool is_act_police;
};
+int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
+ return 0;
+
+ /* Max flows changed, free the existing bitmap */
+ kfree(tc->tc_entries_bitmap);
+
+ tc->tc_entries_bitmap =
+ kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
+ sizeof(long), GFP_KERNEL);
+ if (!tc->tc_entries_bitmap) {
+ netdev_err(nic->netdev,
+ "Unable to alloc TC flow entries bitmap\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
+
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
u32 *burst_mantissa)
{
@@ -596,6 +619,7 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
int err;
@@ -638,7 +662,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
kfree_rcu(flow_node, rcu);
clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
- tc_info->num_entries--;
+ flow_cfg->nr_flows--;
return 0;
}
@@ -647,6 +671,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req, dummy;
@@ -655,9 +680,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
- if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
+ if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
NL_SET_ERR_MSG_MOD(extack,
- "Not enough MCAM space to add the flow");
+ "Free MCAM entry not available to add the flow");
return -ENOMEM;
}
@@ -695,10 +720,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
- nic->flow_cfg->tc_max_flows);
+ flow_cfg->max_flows);
req->channel = nic->hw.rx_chan_base;
- req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
- nic->flow_cfg->tc_max_flows - new_node->bitpos];
+ req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -723,7 +747,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
- tc_info->num_entries++;
+ flow_cfg->nr_flows++;
return 0;
@@ -1008,10 +1032,21 @@ static const struct rhashtable_params tc_flow_ht_params = {
int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
+ int err;
/* Exclude receive queue 0 being used for police action */
set_bit(0, &nic->rq_bmap);
+ if (!nic->flow_cfg) {
+ netdev_err(nic->netdev,
+ "Can't init TC, nic->flow_cfg is not setup\n");
+ return -EINVAL;
+ }
+
+ err = otx2_tc_alloc_ent_bitmap(nic);
+ if (err)
+ return err;
+
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
@@ -1020,5 +1055,6 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
+ kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index a8bee5aefec1..58b912653ac2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -464,6 +464,28 @@ static void otx2vf_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ if (changed & NETIF_F_NTUPLE) {
+ if (!ntuple_enabled) {
+ otx2_mcam_flow_del(vf);
+ return 0;
+ }
+
+ if (!otx2_get_maxflows(vf->flow_cfg)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
@@ -471,6 +493,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
};
@@ -627,12 +650,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_HW_VLAN_STAG_TX;
netdev->features |= netdev->hw_features;
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_RXALL;
+
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
- /* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
@@ -658,6 +683,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2vf_set_ethtool_ops(netdev);
+ err = otx2vf_mcam_flow_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_unreg_netdev;
+
/* Enable pause frames by default */
vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
@@ -695,6 +728,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
cancel_work_sync(&vf->reset_task);
+ otx2_unregister_dl(vf);
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index fa7a0682ad1e..68b442eb6d69 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -390,11 +390,12 @@ static const struct devlink_ops prestera_dl_ops = {
.trap_drop_counter_get = prestera_drop_counter_get,
};
-struct prestera_switch *prestera_devlink_alloc(void)
+struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev)
{
struct devlink *dl;
- dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch));
+ dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch),
+ dev->dev);
return devlink_priv(dl);
}
@@ -411,7 +412,7 @@ int prestera_devlink_register(struct prestera_switch *sw)
struct devlink *dl = priv_to_devlink(sw);
int err;
- err = devlink_register(dl, sw->dev->dev);
+ err = devlink_register(dl);
if (err) {
dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
return err;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index 5d73aa9db897..cc34c3db13a2 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -6,7 +6,7 @@
#include "prestera.h"
-struct prestera_switch *prestera_devlink_alloc(void);
+struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev);
void prestera_devlink_free(struct prestera_switch *sw);
int prestera_devlink_register(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 226f4ff29f6e..44c670807fb3 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -746,7 +746,8 @@ static int prestera_netdev_port_event(struct net_device *lower,
case NETDEV_CHANGEUPPER:
if (netif_is_bridge_master(upper)) {
if (info->linking)
- return prestera_bridge_port_join(upper, port);
+ return prestera_bridge_port_join(upper, port,
+ extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
@@ -904,7 +905,7 @@ int prestera_device_register(struct prestera_device *dev)
struct prestera_switch *sw;
int err;
- sw = prestera_devlink_alloc();
+ sw = prestera_devlink_alloc(dev);
if (!sw)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 9a309169dbae..3ce6ccd0f539 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -480,7 +480,8 @@ err_port_flood_set:
}
int prestera_bridge_port_join(struct net_device *br_dev,
- struct prestera_port *port)
+ struct prestera_port *port,
+ struct netlink_ext_ack *extack)
{
struct prestera_switchdev *swdev = port->sw->swdev;
struct prestera_bridge_port *br_port;
@@ -500,6 +501,11 @@ int prestera_bridge_port_join(struct net_device *br_dev,
goto err_brport_create;
}
+ err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
+ NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
if (bridge->vlan_enabled)
return 0;
@@ -510,6 +516,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
return 0;
err_port_join:
+ switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
+err_switchdev_offload:
prestera_bridge_port_put(br_port);
err_brport_create:
prestera_bridge_put(bridge);
@@ -584,6 +592,8 @@ void prestera_bridge_port_leave(struct net_device *br_dev,
else
prestera_bridge_1d_port_leave(br_port);
+ switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
+
prestera_hw_port_learning_set(port, false);
prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
index a91bc35d235f..0e93fda3d9a5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -8,7 +8,8 @@ int prestera_switchdev_init(struct prestera_switch *sw);
void prestera_switchdev_fini(struct prestera_switch *sw);
int prestera_bridge_port_join(struct net_device *br_dev,
- struct prestera_port *port);
+ struct prestera_port *port,
+ struct netlink_ext_ack *extack);
void prestera_bridge_port_leave(struct net_device *br_dev,
struct prestera_port *port);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 9b48ae4bac39..fab53c9b8380 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1377,7 +1377,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = {
.ndo_set_rx_mode = pxa168_eth_set_rx_mode,
.ndo_set_mac_address = pxa168_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_change_mtu = pxa168_eth_change_mtu,
.ndo_tx_timeout = pxa168_eth_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index d4bb27ba1419..150c06ee3627 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3787,7 +3787,7 @@ static const struct net_device_ops skge_netdev_ops = {
.ndo_open = skge_up,
.ndo_stop = skge_down,
.ndo_start_xmit = skge_xmit_frame,
- .ndo_do_ioctl = skge_ioctl,
+ .ndo_eth_ioctl = skge_ioctl,
.ndo_get_stats = skge_get_stats,
.ndo_tx_timeout = skge_tx_timeout,
.ndo_change_mtu = skge_change_mtu,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 8b8bff59c8fe..dc9dd77d1ea0 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4693,7 +4693,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_open = sky2_open,
.ndo_stop = sky2_close,
.ndo_start_xmit = sky2_xmit_frame,
- .ndo_do_ioctl = sky2_ioctl,
+ .ndo_eth_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = sky2_set_mac_address,
.ndo_set_rx_mode = sky2_set_multicast,
@@ -4710,7 +4710,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_open = sky2_open,
.ndo_stop = sky2_close,
.ndo_start_xmit = sky2_xmit_frame,
- .ndo_do_ioctl = sky2_ioctl,
+ .ndo_eth_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = sky2_set_mac_address,
.ndo_set_rx_mode = sky2_set_multicast,
@@ -4884,7 +4884,7 @@ static int sky2_test_msi(struct sky2_hw *hw)
/* This driver supports yukon2 chipset only */
static const char *sky2_name(u8 chipid, char *buf, int sz)
{
- const char *name[] = {
+ static const char *const name[] = {
"XL", /* 0xb3 */
"EC Ultra", /* 0xb4 */
"Extreme", /* 0xb5 */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 64adfd24e134..398c23cec815 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2933,7 +2933,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_start_xmit = mtk_start_xmit,
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_eth_ioctl = mtk_do_ioctl,
.ndo_change_mtu = mtk_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 96d2891f1675..1d5dd2015453 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1162,7 +1162,7 @@ static const struct net_device_ops mtk_star_netdev_ops = {
.ndo_start_xmit = mtk_star_netdev_start_xmit,
.ndo_get_stats64 = mtk_star_netdev_get_stats64,
.ndo_set_rx_mode = mtk_star_set_rx_mode,
- .ndo_do_ioctl = mtk_star_netdev_ioctl,
+ .ndo_eth_ioctl = mtk_star_netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 400e611ba041..1b4b1f642317 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -6,8 +6,8 @@
config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI && NETDEVICES && ETHERNET && INET
+ depends on PTP_1588_CLOCK_OPTIONAL
select MLX4_CORE
- imply PTP_1588_CLOCK
help
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5d0c9c62382d..a2f61a87cef8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2828,7 +2828,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
- .ndo_do_ioctl = mlx4_en_ioctl,
+ .ndo_eth_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 28ac4693da3c..7267c6c6d2e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4005,7 +4005,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
printk_once(KERN_INFO "%s", mlx4_version);
- devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
+ devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
priv = devlink_priv(devlink);
@@ -4024,7 +4024,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_register(devlink, &pdev->dev);
+ ret = devlink_register(devlink);
if (ret)
goto err_persist_free;
ret = devlink_params_register(devlink, mlx4_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 427e7a31862c..b149e601f673 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -739,7 +739,7 @@ static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
int i;
for (i = 0;
- i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
+ i < ARRAY_SIZE(qp_table->zones_uids);
i++) {
struct mlx4_bitmap *bitmap =
mlx4_zone_get_bitmap(qp_table->zones,
@@ -917,7 +917,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
{
int err;
int i;
- enum mlx4_qp_state states[] = {
+ static const enum mlx4_qp_state states[] = {
MLX4_QP_STATE_RST,
MLX4_QP_STATE_INIT,
MLX4_QP_STATE_RTR,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index e1a5a79e27c7..92056452a9e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -10,7 +10,7 @@ config MLX5_CORE
select NET_DEVLINK
depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
- depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
help
Core driver for low level functionality of the ConnectX-4 and
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index b5072a3a2585..4fccc9bc0328 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -15,14 +15,15 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
- lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
+ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
fw_reset.o qos.o
#
# Netdev basic
#
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
+ en/channels.o en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 9d79c5ec31e9..db5dfff585c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -877,7 +877,7 @@ static void cb_timeout_handler(struct work_struct *work)
ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
out:
cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
@@ -994,7 +994,7 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, status, status);
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
return;
}
@@ -1008,7 +1008,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
}
}
@@ -1068,7 +1068,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
ent->ret = -ETIMEDOUT;
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
}
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 360e093874d4..cf97985628ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -89,7 +89,8 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
{
- int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
+ int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+ c_eqn_or_apu_element);
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
struct mlx5_eq_comp *eq;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index def2156e50ee..ff6b03dc7e32 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -53,7 +53,7 @@ static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
return true;
}
-static bool is_eth_supported(struct mlx5_core_dev *dev)
+bool mlx5_eth_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
@@ -105,7 +105,18 @@ static bool is_eth_supported(struct mlx5_core_dev *dev)
return true;
}
-static bool is_vnet_supported(struct mlx5_core_dev *dev)
+static bool is_eth_enabled(struct mlx5_core_dev *dev)
+{
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ &val);
+ return err ? false : val.vbool;
+}
+
+bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
return false;
@@ -127,6 +138,17 @@ static bool is_vnet_supported(struct mlx5_core_dev *dev)
return true;
}
+static bool is_vnet_enabled(struct mlx5_core_dev *dev)
+{
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ &val);
+ return err ? false : val.vbool;
+}
+
static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
@@ -170,7 +192,7 @@ static bool is_mp_supported(struct mlx5_core_dev *dev)
return true;
}
-static bool is_ib_supported(struct mlx5_core_dev *dev)
+bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return false;
@@ -187,6 +209,17 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
return true;
}
+static bool is_ib_enabled(struct mlx5_core_dev *dev)
+{
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &val);
+ return err ? false : val.vbool;
+}
+
enum {
MLX5_INTERFACE_PROTOCOL_ETH,
MLX5_INTERFACE_PROTOCOL_ETH_REP,
@@ -201,13 +234,17 @@ enum {
static const struct mlx5_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx5_core_dev *dev);
+ bool (*is_enabled)(struct mlx5_core_dev *dev);
} mlx5_adev_devices[] = {
[MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
- .is_supported = &is_vnet_supported },
+ .is_supported = &mlx5_vnet_supported,
+ .is_enabled = &is_vnet_enabled },
[MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
- .is_supported = &is_ib_supported },
+ .is_supported = &mlx5_rdma_supported,
+ .is_enabled = &is_ib_enabled },
[MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
- .is_supported = &is_eth_supported },
+ .is_supported = &mlx5_eth_supported,
+ .is_enabled = &is_eth_enabled },
[MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
.is_supported = &is_eth_rep_supported },
[MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
@@ -308,6 +345,14 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
if (!priv->adev[i]) {
bool is_supported = false;
+ if (mlx5_adev_devices[i].is_enabled) {
+ bool enabled;
+
+ enabled = mlx5_adev_devices[i].is_enabled(dev);
+ if (!enabled)
+ continue;
+ }
+
if (mlx5_adev_devices[i].is_supported)
is_supported = mlx5_adev_devices[i].is_supported(dev);
@@ -360,6 +405,14 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
if (!priv->adev[i])
continue;
+ if (mlx5_adev_devices[i].is_enabled) {
+ bool enabled;
+
+ enabled = mlx5_adev_devices[i].is_enabled(dev);
+ if (!enabled)
+ goto skip_suspend;
+ }
+
adev = &priv->adev[i]->adev;
/* Auxiliary driver was unbind manually through sysfs */
if (!adev->dev.driver)
@@ -447,12 +500,21 @@ static void delete_drivers(struct mlx5_core_dev *dev)
if (!priv->adev[i])
continue;
+ if (mlx5_adev_devices[i].is_enabled) {
+ bool enabled;
+
+ enabled = mlx5_adev_devices[i].is_enabled(dev);
+ if (!enabled)
+ goto del_adev;
+ }
+
if (mlx5_adev_devices[i].is_supported && !delete_all)
is_supported = mlx5_adev_devices[i].is_supported(dev);
if (is_supported)
continue;
+del_adev:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d791d351b489..6f4d7c7f06e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -359,9 +359,10 @@ int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
return 0;
}
-struct devlink *mlx5_devlink_alloc(void)
+struct devlink *mlx5_devlink_alloc(struct device *dev)
{
- return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev));
+ return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev),
+ dev);
}
void mlx5_devlink_free(struct devlink *devlink)
@@ -595,6 +596,157 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
#endif
}
+static const struct devlink_param enable_eth_param =
+ DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL);
+
+static int mlx5_devlink_eth_param_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!mlx5_eth_supported(dev))
+ return 0;
+
+ err = devlink_param_register(devlink, &enable_eth_param);
+ if (err)
+ return err;
+
+ value.vbool = true;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ value);
+ devlink_param_publish(devlink, &enable_eth_param);
+ return 0;
+}
+
+static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!mlx5_eth_supported(dev))
+ return;
+
+ devlink_param_unpublish(devlink, &enable_eth_param);
+ devlink_param_unregister(devlink, &enable_eth_param);
+}
+
+static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !mlx5_rdma_supported(dev))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static const struct devlink_param enable_rdma_param =
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_enable_rdma_validate);
+
+static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ return 0;
+
+ err = devlink_param_register(devlink, &enable_rdma_param);
+ if (err)
+ return err;
+
+ value.vbool = true;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+ devlink_param_publish(devlink, &enable_rdma_param);
+ return 0;
+}
+
+static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ return;
+
+ devlink_param_unpublish(devlink, &enable_rdma_param);
+ devlink_param_unregister(devlink, &enable_rdma_param);
+}
+
+static const struct devlink_param enable_vnet_param =
+ DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL);
+
+static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!mlx5_vnet_supported(dev))
+ return 0;
+
+ err = devlink_param_register(devlink, &enable_vnet_param);
+ if (err)
+ return err;
+
+ value.vbool = true;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ value);
+ devlink_param_publish(devlink, &enable_rdma_param);
+ return 0;
+}
+
+static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!mlx5_vnet_supported(dev))
+ return;
+
+ devlink_param_unpublish(devlink, &enable_vnet_param);
+ devlink_param_unregister(devlink, &enable_vnet_param);
+}
+
+static int mlx5_devlink_auxdev_params_register(struct devlink *devlink)
+{
+ int err;
+
+ err = mlx5_devlink_eth_param_register(devlink);
+ if (err)
+ return err;
+
+ err = mlx5_devlink_rdma_param_register(devlink);
+ if (err)
+ goto rdma_err;
+
+ err = mlx5_devlink_vnet_param_register(devlink);
+ if (err)
+ goto vnet_err;
+ return 0;
+
+vnet_err:
+ mlx5_devlink_rdma_param_unregister(devlink);
+rdma_err:
+ mlx5_devlink_eth_param_unregister(devlink);
+ return err;
+}
+
+static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
+{
+ mlx5_devlink_vnet_param_unregister(devlink);
+ mlx5_devlink_rdma_param_unregister(devlink);
+ mlx5_devlink_eth_param_unregister(devlink);
+}
+
#define MLX5_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
@@ -638,11 +790,11 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_trap_groups_arr));
}
-int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
+int mlx5_devlink_register(struct devlink *devlink)
{
int err;
- err = devlink_register(devlink, dev);
+ err = devlink_register(devlink);
if (err)
return err;
@@ -653,6 +805,10 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+ err = mlx5_devlink_auxdev_params_register(devlink);
+ if (err)
+ goto auxdev_reg_err;
+
err = mlx5_devlink_traps_register(devlink);
if (err)
goto traps_reg_err;
@@ -660,6 +816,8 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
return 0;
traps_reg_err:
+ mlx5_devlink_auxdev_params_unregister(devlink);
+auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
params_reg_err:
@@ -670,6 +828,8 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
+ mlx5_devlink_auxdev_params_unregister(devlink);
+ devlink_params_unpublish(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 7318d44b774b..30bf4882779b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -31,9 +31,9 @@ int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action);
-struct devlink *mlx5_devlink_alloc(void);
+struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink);
-int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
+int mlx5_devlink_register(struct devlink *devlink);
void mlx5_devlink_unregister(struct devlink *devlink);
#endif /* __MLX5_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b1b51bbba054..669a75f3537a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -58,6 +58,7 @@
#include "en/qos.h"
#include "lib/hv_vhca.h"
#include "lib/clock.h"
+#include "en/rx_res.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -65,14 +66,13 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
-
#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
#define MLX5E_MAX_NUM_TC 8
+#define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE
#define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
@@ -126,7 +126,6 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
@@ -139,8 +138,6 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
-#define MLX5E_LOG_INDIR_RQT_SIZE 0x8
-#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
@@ -252,7 +249,10 @@ struct mlx5e_params {
u8 rq_wq_type;
u8 log_rq_mtu_frames;
u16 num_channels;
- u8 num_tc;
+ struct {
+ u16 mode;
+ u8 num_tc;
+ } mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
@@ -272,6 +272,12 @@ struct mlx5e_params {
bool ptp_rx;
};
+static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
+{
+ return params->mqprio.mode == TC_MQPRIO_MODE_DCB ?
+ params->mqprio.num_tc : 1;
+}
+
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_RECOVERING,
@@ -745,29 +751,11 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
-struct mlx5e_rqt {
- u32 rqtn;
- bool enabled;
-};
-
-struct mlx5e_tir {
- u32 tirn;
- struct mlx5e_rqt rqt;
- struct list_head list;
-};
-
enum {
MLX5E_TC_PRIO = 0,
MLX5E_NIC_PRIO
};
-struct mlx5e_rss_params {
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
- u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
- u8 toeplitz_hash_key[40];
- u8 hfunc;
-};
-
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
@@ -837,13 +825,7 @@ struct mlx5e_priv {
struct mlx5e_channels channels;
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
- struct mlx5e_rqt indir_rqt;
- struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
- struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
- struct mlx5e_tir ptp_tir;
- struct mlx5e_rss_params rss_params;
+ struct mlx5e_rx_res *rx_res;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
@@ -948,25 +930,6 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
-struct mlx5e_redirect_rqt_param {
- bool is_rss;
- union {
- u32 rqn; /* Direct RQN (Non-RSS) */
- struct {
- u8 hfunc;
- struct mlx5e_channels *channels;
- } rss; /* RSS data */
- };
-};
-
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
- struct mlx5e_redirect_rqt_param rrp);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
- const struct mlx5e_tirc_config *ttconfig,
- void *tirc, bool inner);
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
-struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
-
struct mlx5e_xsk_param;
struct mlx5e_rq_param;
@@ -1028,9 +991,6 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
- int num_channels);
-
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
@@ -1065,10 +1025,6 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
-int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
- u32 *in);
-void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
@@ -1084,17 +1040,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
void mlx5e_free_di_list(struct mlx5e_rq *rq);
-int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
-
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
-
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n);
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1106,7 +1051,6 @@ int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
-int mlx5e_bits_invert(unsigned long a, int size);
int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
@@ -1183,8 +1127,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
- u16 num_channels);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
new file mode 100644
index 000000000000..e7c14c0de0a7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "channels.h"
+#include "en.h"
+#include "en/ptp.h"
+
+unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
+{
+ return chs->num;
+}
+
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c;
+
+ WARN_ON(ix >= mlx5e_channels_get_num(chs));
+ c = chs->c[ix];
+
+ *rqn = c->rq.rqn;
+}
+
+bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c;
+
+ WARN_ON(ix >= mlx5e_channels_get_num(chs));
+ c = chs->c[ix];
+
+ if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ return false;
+
+ *rqn = c->xskrq.rqn;
+ return true;
+}
+
+bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
+{
+ struct mlx5e_ptp *c = chs->ptp;
+
+ if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
+ return false;
+
+ *rqn = c->rq.rqn;
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
new file mode 100644
index 000000000000..ca00cbc827cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_CHANNELS_H__
+#define __MLX5_EN_CHANNELS_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_channels;
+
+unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
+
+#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index bc33eaada3b9..86e079310ac3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -55,19 +55,15 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
- if (dl_port->registered)
- devlink_port_unregister(dl_port);
+ devlink_port_unregister(dl_port);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct devlink_port *port;
if (!netif_device_present(dev))
return NULL;
- port = mlx5e_devlink_get_dl_port(priv);
- if (port->registered)
- return port;
- return NULL;
+
+ return mlx5e_devlink_get_dl_port(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1d5ce07b83f4..e348c276eaa1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -5,6 +5,7 @@
#define __MLX5E_FLOW_STEER_H__
#include "mod_hdr.h"
+#include "lib/fs_ttc.h"
enum {
MLX5E_TC_FT_LEVEL = 0,
@@ -67,27 +68,7 @@ struct mlx5e_l2_table {
bool promisc_enabled;
};
-enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP,
- MLX5E_TT_IPV6_TCP,
- MLX5E_TT_IPV4_UDP,
- MLX5E_TT_IPV6_UDP,
- MLX5E_TT_IPV4_IPSEC_AH,
- MLX5E_TT_IPV6_IPSEC_AH,
- MLX5E_TT_IPV4_IPSEC_ESP,
- MLX5E_TT_IPV6_IPSEC_ESP,
- MLX5E_TT_IPV4,
- MLX5E_TT_IPV6,
- MLX5E_TT_ANY,
- MLX5E_NUM_TT,
- MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
-};
-
-struct mlx5e_tirc_config {
- u8 l3_prot_type;
- u8 l4_prot_type;
- u32 rx_hash_fields;
-};
+#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -99,30 +80,6 @@ struct mlx5e_tirc_config {
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-enum mlx5e_tunnel_types {
- MLX5E_TT_IPV4_GRE,
- MLX5E_TT_IPV6_GRE,
- MLX5E_TT_IPV4_IPIP,
- MLX5E_TT_IPV6_IPIP,
- MLX5E_TT_IPV4_IPV6,
- MLX5E_TT_IPV6_IPV6,
- MLX5E_NUM_TUNNEL_TT,
-};
-
-bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
-
-struct mlx5e_ttc_rule {
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_destination default_dest;
-};
-
-/* L3/L4 traffic type classifier */
-struct mlx5e_ttc_table {
- struct mlx5e_flow_table ft;
- struct mlx5e_ttc_rule rules[MLX5E_NUM_TT];
- struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
-};
-
/* NIC prio FTS */
enum {
MLX5E_PROMISC_FT_LEVEL,
@@ -144,21 +101,7 @@ enum {
#endif
};
-#define MLX5E_TTC_NUM_GROUPS 3
-#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
- MLX5E_TTC_GROUP2_SIZE +\
- MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS 3
-#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
- MLX5E_INNER_TTC_GROUP2_SIZE +\
- MLX5E_INNER_TTC_GROUP3_SIZE)
+struct mlx5e_priv;
#ifdef CONFIG_MLX5_EN_RXNFC
@@ -226,8 +169,8 @@ struct mlx5e_flow_steering {
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
- struct mlx5e_ttc_table ttc;
- struct mlx5e_ttc_table inner_ttc;
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_ttc_table *inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables *arfs;
#endif
@@ -239,33 +182,13 @@ struct mlx5e_flow_steering {
struct mlx5e_ptp_fs *ptp_fs;
};
-struct ttc_params {
- struct mlx5_flow_table_attr ft_attr;
- u32 any_tt_tirn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_ttc_table *inner_ttc;
-};
-
-void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
-void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
+void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params, bool tunnel);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
- struct mlx5_flow_destination *new_dest);
-struct mlx5_flow_destination
-mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type);
-int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
@@ -273,7 +196,6 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 909faa6c89d7..7aa25a5e29d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -33,22 +33,22 @@ static char *fs_udp_type2str(enum fs_udp_type i)
}
}
-static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i)
+static enum mlx5_traffic_types fs_udp2tt(enum fs_udp_type i)
{
switch (i) {
case FS_IPV4_UDP:
- return MLX5E_TT_IPV4_UDP;
+ return MLX5_TT_IPV4_UDP;
default: /* FS_IPV6_UDP */
- return MLX5E_TT_IPV6_UDP;
+ return MLX5_TT_IPV6_UDP;
}
}
-static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i)
+static enum fs_udp_type tt2fs_udp(enum mlx5_traffic_types i)
{
switch (i) {
- case MLX5E_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_UDP:
return FS_IPV4_UDP;
- case MLX5E_TT_IPV6_UDP:
+ case MLX5_TT_IPV6_UDP:
return FS_IPV6_UDP;
default:
return FS_UDP_NUM_TYPES;
@@ -75,7 +75,7 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types ttc_type,
+ enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port)
{
enum fs_udp_type type = tt2fs_udp(ttc_type);
@@ -124,7 +124,7 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
fs_udp = priv->fs.udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -281,7 +281,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.ft = priv->fs.udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -401,7 +401,7 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
fs_any = priv->fs.any;
fs_any_t = &fs_any->table;
- dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -514,11 +514,11 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, MLX5E_TT_ANY, err);
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
@@ -533,11 +533,11 @@ static int fs_any_enable(struct mlx5e_priv *priv)
dest.ft = priv->fs.any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, MLX5E_TT_ANY, err);
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
index 8385df24eb99..7a70c4f38fda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -12,7 +12,7 @@ void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types ttc_type,
+ enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port);
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
index ea321e528749..4e72ca8070e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -5,11 +5,15 @@
#include <linux/slab.h>
#include <linux/xarray.h>
#include <linux/hashtable.h>
+#include <linux/refcount.h>
#include "mapping.h"
#define MAPPING_GRACE_PERIOD 2000
+static LIST_HEAD(shared_ctx_list);
+static DEFINE_MUTEX(shared_ctx_lock);
+
struct mapping_ctx {
struct xarray xarray;
DECLARE_HASHTABLE(ht, 8);
@@ -20,6 +24,10 @@ struct mapping_ctx {
struct delayed_work dwork;
struct list_head pending_list;
spinlock_t pending_list_lock; /* Guards pending list */
+ u64 id;
+ u8 type;
+ struct list_head list;
+ refcount_t refcount;
};
struct mapping_item {
@@ -205,11 +213,48 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
mutex_init(&ctx->lock);
xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
+ refcount_set(&ctx->refcount, 1);
+ INIT_LIST_HEAD(&ctx->list);
+
+ return ctx;
+}
+
+struct mapping_ctx *
+mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal)
+{
+ struct mapping_ctx *ctx;
+
+ mutex_lock(&shared_ctx_lock);
+ list_for_each_entry(ctx, &shared_ctx_list, list) {
+ if (ctx->id == id && ctx->type == type) {
+ if (refcount_inc_not_zero(&ctx->refcount))
+ goto unlock;
+ break;
+ }
+ }
+
+ ctx = mapping_create(data_size, max_id, delayed_removal);
+ if (IS_ERR(ctx))
+ goto unlock;
+
+ ctx->id = id;
+ ctx->type = type;
+ list_add(&ctx->list, &shared_ctx_list);
+
+unlock:
+ mutex_unlock(&shared_ctx_lock);
return ctx;
}
void mapping_destroy(struct mapping_ctx *ctx)
{
+ if (!refcount_dec_and_test(&ctx->refcount))
+ return;
+
+ mutex_lock(&shared_ctx_lock);
+ list_del(&ctx->list);
+ mutex_unlock(&shared_ctx_lock);
+
mapping_flush_work(ctx);
xa_destroy(&ctx->xarray);
mutex_destroy(&ctx->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
index 285525cc5470..4e2119f0f4c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -24,4 +24,9 @@ struct mapping_ctx *mapping_create(size_t data_size, u32 max_id,
bool delayed_removal);
void mapping_destroy(struct mapping_ctx *ctx);
+/* adds mapping with an id or get an existing mapping with the same id
+ */
+struct mapping_ctx *
+mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal);
+
#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 2cbf18c967f7..3cbb596821e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -167,6 +167,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
+struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
+{
+ struct mlx5e_lro_param lro_param;
+
+ lro_param = (struct mlx5e_lro_param) {
+ .enabled = params->lro_en,
+ .timeout = params->lro_timeout,
+ };
+
+ return lro_param;
+}
+
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index e9593f5f0661..879ad46d754e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -11,6 +11,11 @@ struct mlx5e_xsk_param {
u16 chunk_size;
};
+struct mlx5e_lro_param {
+ bool enabled;
+ u32 timeout;
+};
+
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
@@ -120,6 +125,7 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
/* Build queue parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index efef4adce086..ee688dec67a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -326,13 +326,14 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
+ u8 num_tc = mlx5e_get_dcb_num_tc(params);
int ix_base;
int err;
int tc;
- ix_base = params->num_tc * params->num_channels;
+ ix_base = num_tc * params->num_channels;
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
int txq_ix = ix_base + tc;
err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
@@ -365,9 +366,12 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder ptp_moder = {};
struct mlx5e_cq_param *cq_param;
+ u8 num_tc;
int err;
int tc;
+ num_tc = mlx5e_get_dcb_num_tc(params);
+
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -375,7 +379,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
cq_param = &cparams->txq_sq_param.cqp;
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
@@ -383,7 +387,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
goto out_err_txqsq_cq;
}
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
@@ -399,7 +403,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
out_err_ts_cq:
for (--tc; tc >= 0; tc--)
mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
- tc = params->num_tc;
+ tc = num_tc;
out_err_txqsq_cq:
for (--tc; tc >= 0; tc--)
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
@@ -475,7 +479,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
params->num_channels = orig->num_channels;
params->hard_mtu = orig->hard_mtu;
params->sw_mtu = orig->sw_mtu;
- params->num_tc = orig->num_tc;
+ params->mqprio = orig->mqprio;
/* SQ */
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
@@ -605,9 +609,9 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
+ u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
struct mlx5_flow_handle *rule;
- u32 tirn = priv->ptp_tir.tirn;
int err;
if (ptp_fs->valid)
@@ -617,7 +621,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
if (err)
goto out_free;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -625,7 +629,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v4_rule = rule;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -680,7 +684,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
- c->num_tc = params->num_tc;
+ c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 5efe3278b0f6..c9ac69f62f21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -132,7 +132,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
*/
bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
- return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid;
+ return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
}
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 3c0032c9647c..0c38c2e319be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -15,9 +15,116 @@ struct mlx5_bridge_switchdev_fdb_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct mlx5_esw_bridge_offloads *br_offloads;
bool add;
};
+static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return esw == priv->mdev->priv.eswitch;
+}
+
+static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev, *esw_mdev;
+ u64 system_guid, esw_system_guid;
+
+ mdev = priv->mdev;
+ esw_mdev = esw->dev;
+
+ system_guid = mlx5_query_nic_system_image_guid(mdev);
+ esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
+
+ return system_guid == esw_system_guid;
+}
+
+static struct net_device *
+mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(lower))
+ continue;
+
+ priv = netdev_priv(lower);
+ mdev = priv->mdev;
+ if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ return lower;
+ }
+
+ return NULL;
+}
+
+static struct net_device *
+mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
+ u16 *vport_num, u16 *esw_owner_vhca_id)
+{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
+
+ if (netif_is_lag_master(dev))
+ dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
+
+ if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
+ return NULL;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ *vport_num = rpriv->rep->vport;
+ *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
+ return dev;
+}
+
+static struct net_device *
+mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
+ u16 *vport_num, u16 *esw_owner_vhca_id)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
+ return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
+ esw_owner_vhca_id);
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ struct net_device *rep;
+
+ if (netif_is_bridge_master(lower_dev))
+ continue;
+
+ rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
+ esw_owner_vhca_id);
+ if (rep)
+ return rep;
+ }
+
+ return NULL;
+}
+
+static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
+ return false;
+
+ priv = netdev_priv(rep);
+ mdev = priv->mdev;
+ if (netif_is_lag_master(dev))
+ return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return true;
+}
+
static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
{
struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
@@ -25,37 +132,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
netdev_nb);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev, *rep;
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct net_device *upper;
- struct mlx5e_priv *priv;
- u16 vport_num;
-
- if (!mlx5e_eswitch_rep(dev))
- return 0;
+ int ifindex = upper->ifindex;
+ int err;
- upper = info->upper_dev;
if (!netif_is_bridge_master(upper))
return 0;
- esw = br_offloads->esw;
- priv = netdev_priv(dev);
- if (esw != priv->mdev->priv.eswitch)
+ rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
+ if (!rep)
return 0;
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
extack = netdev_notifier_info_to_extack(&info->info);
- return info->linking ?
- mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) :
- mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack);
+ if (mlx5_esw_bridge_is_local(dev, rep, esw))
+ err = info->linking ?
+ mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack) :
+ mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack);
+ else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
+ err = info->linking ?
+ mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack) :
+ mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
+ br_offloads, extack);
+
+ return err;
}
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
@@ -75,31 +181,28 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
return notifier_from_errno(err);
}
-static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
- const void *ctx,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int
+mlx5_esw_bridge_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
+ struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+ const struct switchdev_obj *obj = port_obj_info->obj;
const struct switchdev_obj_port_vlan *vlan;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
- int err = 0;
+ u16 vport_num, esw_owner_vhca_id;
+ int err;
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
+ return 0;
+
+ port_obj_info->handled = true;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack);
+ err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
+ vlan->flags, br_offloads, extack);
break;
default:
return -EOPNOTSUPP;
@@ -107,29 +210,25 @@ static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
return err;
}
-static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
- const void *ctx,
- const struct switchdev_obj *obj)
+static int
+mlx5_esw_bridge_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
+ const struct switchdev_obj *obj = port_obj_info->obj;
const struct switchdev_obj_port_vlan *vlan;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
+ u16 vport_num, esw_owner_vhca_id;
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
+ return 0;
+
+ port_obj_info->handled = true;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport);
+ mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
break;
default:
return -EOPNOTSUPP;
@@ -137,25 +236,21 @@ static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
return 0;
}
-static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
- const void *ctx,
- const struct switchdev_attr *attr,
- struct netlink_ext_ack *extack)
+static int
+mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
- int err = 0;
+ struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
+ const struct switchdev_attr *attr = port_attr_info->attr;
+ u16 vport_num, esw_owner_vhca_id;
+ int err;
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
+ return 0;
+
+ port_attr_info->handled = true;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
@@ -167,10 +262,12 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport);
+ err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
+ attr->u.ageing_time, br_offloads);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport);
+ err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
+ attr->u.vlan_filtering, br_offloads);
break;
default:
err = -EOPNOTSUPP;
@@ -179,27 +276,24 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
return err;
}
-static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused,
+static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
unsigned long event, void *ptr)
{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
+ struct mlx5_esw_bridge_offloads,
+ nb_blk);
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = switchdev_handle_port_obj_add(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_add);
+ err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
break;
case SWITCHDEV_PORT_OBJ_DEL:
- err = switchdev_handle_port_obj_del(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_del);
+ err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
break;
case SWITCHDEV_PORT_ATTR_SET:
- err = switchdev_handle_port_attr_set(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_attr_set);
+ err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
break;
default:
err = 0;
@@ -222,27 +316,23 @@ static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
struct switchdev_notifier_fdb_info *fdb_info =
&fdb_work->fdb_info;
+ struct mlx5_esw_bridge_offloads *br_offloads =
+ fdb_work->br_offloads;
struct net_device *dev = fdb_work->dev;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- struct mlx5e_priv *priv;
- u16 vport_num;
+ u16 vport_num, esw_owner_vhca_id;
rtnl_lock();
- priv = netdev_priv(dev);
- rpriv = priv->ppriv;
- vport_num = rpriv->rep->vport;
- esw = priv->mdev->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
+ if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
+ &esw_owner_vhca_id))
goto out;
if (fdb_work->add)
- mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info);
+ mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
else
- mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info);
+ mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
out:
rtnl_unlock();
@@ -251,7 +341,8 @@ out:
static struct mlx5_bridge_switchdev_fdb_work *
mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
- struct switchdev_notifier_fdb_info *fdb_info)
+ struct switchdev_notifier_fdb_info *fdb_info,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_bridge_switchdev_fdb_work *work;
u8 *addr;
@@ -273,6 +364,7 @@ mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
dev_hold(dev);
work->dev = dev;
+ work->br_offloads = br_offloads;
work->add = add;
return work;
}
@@ -286,20 +378,14 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct switchdev_notifier_fdb_info *fdb_info;
struct mlx5_bridge_switchdev_fdb_work *work;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct switchdev_notifier_info *info = ptr;
- struct net_device *upper;
- struct mlx5e_priv *priv;
-
- if (!mlx5e_eswitch_rep(dev))
- return NOTIFY_DONE;
- priv = netdev_priv(dev);
- if (priv->mdev->priv.eswitch != br_offloads->esw)
- return NOTIFY_DONE;
+ u16 vport_num, esw_owner_vhca_id;
+ struct net_device *upper, *rep;
if (event == SWITCHDEV_PORT_ATTR_SET) {
- int err = switchdev_handle_port_attr_set(dev, ptr,
- mlx5e_eswitch_rep,
- mlx5_esw_bridge_port_obj_attr_set);
+ int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
+
return notifier_from_errno(err);
}
@@ -309,7 +395,27 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!netif_is_bridge_master(upper))
return NOTIFY_DONE;
+ rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
+ if (!rep)
+ return NOTIFY_DONE;
+
switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+ /* only handle the event on native eswtich of representor */
+ if (!mlx5_esw_bridge_is_local(dev, rep, esw))
+ break;
+
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ /* only handle the event on peers */
+ if (mlx5_esw_bridge_is_local(dev, rep, esw))
+ break;
+ fallthrough;
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = container_of(info,
@@ -318,7 +424,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
event == SWITCHDEV_FDB_ADD_TO_DEVICE,
- fdb_info);
+ fdb_info,
+ br_offloads);
if (IS_ERR(work)) {
WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
PTR_ERR(work));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9d361efd5ff7..bb682fd751c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -372,7 +372,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
@@ -384,7 +384,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
&ptp_ch->ptpsq[tc],
tc);
@@ -494,7 +494,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
@@ -504,7 +504,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
- for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
new file mode 100644
index 000000000000..b915fb29dd2c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "rqt.h"
+#include <linux/mlx5/transobj.h>
+
+void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
+ unsigned int num_channels)
+{
+ unsigned int i;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ indir->table[i] = i % num_channels;
+}
+
+static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u16 max_size, u32 *init_rqns, u16 init_size)
+{
+ void *rqtc;
+ int inlen;
+ int err;
+ u32 *in;
+ int i;
+
+ rqt->mdev = mdev;
+ rqt->size = max_size;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
+ for (i = 0; i < init_size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
+
+ kvfree(in);
+ return err;
+}
+
+int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ bool indir_enabled, u32 init_rqn)
+{
+ u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1;
+
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+ return inv;
+}
+
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ unsigned int i;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ unsigned int ix = i;
+
+ if (hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE));
+
+ ix = indir->table[ix];
+
+ if (WARN_ON(ix >= num_rqns))
+ /* Could be a bug in the driver or in the kernel part of
+ * ethtool: indir table refers to non-existent RQs.
+ */
+ return -EINVAL;
+ rss_rqns[i] = rqns[ix];
+ }
+
+ return 0;
+}
+
+int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ u32 *rss_rqns;
+ int err;
+
+ rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ if (!rss_rqns)
+ return -ENOMEM;
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (err)
+ goto out;
+
+ err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+
+out:
+ kvfree(rss_rqns);
+ return err;
+}
+
+void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
+{
+ mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
+}
+
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+{
+ unsigned int i;
+ void *rqtc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+{
+ return mlx5e_rqt_redirect(rqt, &rqn, 1);
+}
+
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir)
+{
+ u32 *rss_rqns;
+ int err;
+
+ if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE))
+ return -EINVAL;
+
+ rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ if (!rss_rqns)
+ return -ENOMEM;
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (err)
+ goto out;
+
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+
+out:
+ kvfree(rss_rqns);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
new file mode 100644
index 000000000000..60c985a12f24
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_RQT_H__
+#define __MLX5_EN_RQT_H__
+
+#include <linux/kernel.h>
+
+#define MLX5E_INDIR_RQT_SIZE (1 << 8)
+
+struct mlx5_core_dev;
+
+struct mlx5e_rss_params_indir {
+ u32 table[MLX5E_INDIR_RQT_SIZE];
+};
+
+void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
+ unsigned int num_channels);
+
+struct mlx5e_rqt {
+ struct mlx5_core_dev *mdev;
+ u32 rqtn;
+ u16 size;
+};
+
+int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ bool indir_enabled, u32 init_rqn);
+int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
+ u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir);
+void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
+
+static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
+{
+ return rqt->rqtn;
+}
+
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+ u8 hfunc, struct mlx5e_rss_params_indir *indir);
+
+#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
new file mode 100644
index 000000000000..625cd49ef96c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES.
+
+#include "rss.h"
+
+#define mlx5e_rss_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
+static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = {
+ [MLX5_TT_IPV4_TCP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV6_TCP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV4_UDP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV6_UDP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5_TT_IPV4_IPSEC_AH] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV6_IPSEC_AH] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV4_IPSEC_ESP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV6_IPSEC_ESP] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5_TT_IPV4] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+ [MLX5_TT_IPV6] = {
+ .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+};
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt)
+{
+ return rss_default_config[tt];
+}
+
+struct mlx5e_rss {
+ struct mlx5e_rss_params_hash hash;
+ struct mlx5e_rss_params_indir indir;
+ u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_rqt rqt;
+ struct mlx5_core_dev *mdev;
+ u32 drop_rqn;
+ bool inner_ft_support;
+ bool enabled;
+ refcount_t refcnt;
+};
+
+struct mlx5e_rss *mlx5e_rss_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL);
+}
+
+void mlx5e_rss_free(struct mlx5e_rss *rss)
+{
+ kvfree(rss);
+}
+
+static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
+{
+ enum mlx5_traffic_types tt;
+
+ rss->hash.hfunc = ETH_RSS_HASH_TOP;
+ netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
+ sizeof(rss->hash.toeplitz_hash_key));
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ rss->rx_hash_fields[tt] =
+ mlx5e_rss_get_default_tt_config(tt).rx_hash_fields;
+}
+
+static struct mlx5e_tir **rss_get_tirp(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ return inner ? &rss->inner_tir[tt] : &rss->tir[tt];
+}
+
+static struct mlx5e_tir *rss_get_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ return *rss_get_tirp(rss, tt, inner);
+}
+
+static struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+
+ rss_tt = mlx5e_rss_get_default_tt_config(tt);
+ rss_tt.rx_hash_fields = rss->rx_hash_fields[tt];
+ return rss_tt;
+}
+
+static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
+ enum mlx5_traffic_types tt,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+ struct mlx5e_tir_builder *builder;
+ struct mlx5e_tir **tir_p;
+ struct mlx5e_tir *tir;
+ u32 rqtn;
+ int err;
+
+ if (inner && !rss->inner_ft_support) {
+ mlx5e_rss_warn(rss->mdev,
+ "Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
+ tt);
+ return -EINVAL;
+ }
+
+ tir_p = rss_get_tirp(rss, tt, inner);
+ if (*tir_p)
+ return -EINVAL;
+
+ tir = kvzalloc(sizeof(*tir), GFP_KERNEL);
+ if (!tir)
+ return -ENOMEM;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder) {
+ err = -ENOMEM;
+ goto free_tir;
+ }
+
+ rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
+ mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
+ rqtn, rss->inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ rss_tt = mlx5e_rss_get_tt_config(rss, tt);
+ mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
+
+ err = mlx5e_tir_init(tir, builder, rss->mdev, true);
+ mlx5e_tir_builder_free(builder);
+ if (err) {
+ mlx5e_rss_warn(rss->mdev, "Failed to create %sindirect TIR: err = %d, tt = %d\n",
+ inner ? "inner " : "", err, tt);
+ goto free_tir;
+ }
+
+ *tir_p = tir;
+ return 0;
+
+free_tir:
+ kvfree(tir);
+ return err;
+}
+
+static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ struct mlx5e_tir **tir_p;
+ struct mlx5e_tir *tir;
+
+ tir_p = rss_get_tirp(rss, tt, inner);
+ if (!*tir_p)
+ return;
+
+ tir = *tir_p;
+ mlx5e_tir_destroy(tir);
+ kvfree(tir);
+ *tir_p = NULL;
+}
+
+static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner)
+{
+ enum mlx5_traffic_types tt, max_tt;
+ int err;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ max_tt = tt;
+ for (tt = 0; tt < max_tt; tt++)
+ mlx5e_rss_destroy_tir(rss, tt, inner);
+ return err;
+}
+
+static void mlx5e_rss_destroy_tirs(struct mlx5e_rss *rss, bool inner)
+{
+ enum mlx5_traffic_types tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5e_rss_destroy_tir(rss, tt, inner);
+}
+
+static int mlx5e_rss_update_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ struct mlx5e_rss_params_traffic_type rss_tt;
+ struct mlx5e_tir_builder *builder;
+ struct mlx5e_tir *tir;
+ int err;
+
+ tir = rss_get_tir(rss, tt, inner);
+ if (!tir)
+ return 0;
+
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
+
+ rss_tt = mlx5e_rss_get_tt_config(rss, tt);
+
+ mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
+ err = mlx5e_tir_modify(tir, builder);
+
+ mlx5e_tir_builder_free(builder);
+ return err;
+}
+
+static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
+{
+ enum mlx5_traffic_types tt;
+ int err, retval;
+
+ retval = 0;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5e_rss_update_tir(rss, tt, false);
+ if (err) {
+ retval = retval ? : err;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ }
+
+ if (!rss->inner_ft_support)
+ continue;
+
+ err = mlx5e_rss_update_tir(rss, tt, true);
+ if (err) {
+ retval = retval ? : err;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ }
+ }
+ return retval;
+}
+
+int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn)
+{
+ rss->mdev = mdev;
+ rss->inner_ft_support = inner_ft_support;
+ rss->drop_rqn = drop_rqn;
+
+ mlx5e_rss_params_init(rss);
+ refcount_set(&rss->refcnt, 1);
+
+ return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn);
+}
+
+int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_lro_param *init_lro_param)
+{
+ int err;
+
+ err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn);
+ if (err)
+ goto err_out;
+
+ err = mlx5e_rss_create_tirs(rss, init_lro_param, false);
+ if (err)
+ goto err_destroy_rqt;
+
+ if (inner_ft_support) {
+ err = mlx5e_rss_create_tirs(rss, init_lro_param, true);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ mlx5e_rss_destroy_tirs(rss, false);
+err_destroy_rqt:
+ mlx5e_rqt_destroy(&rss->rqt);
+err_out:
+ return err;
+}
+
+int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
+{
+ if (!refcount_dec_if_one(&rss->refcnt))
+ return -EBUSY;
+
+ mlx5e_rss_destroy_tirs(rss, false);
+
+ if (rss->inner_ft_support)
+ mlx5e_rss_destroy_tirs(rss, true);
+
+ mlx5e_rqt_destroy(&rss->rqt);
+
+ return 0;
+}
+
+void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss)
+{
+ refcount_inc(&rss->refcnt);
+}
+
+void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss)
+{
+ refcount_dec(&rss->refcnt);
+}
+
+unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss)
+{
+ return refcount_read(&rss->refcnt);
+}
+
+u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner)
+{
+ struct mlx5e_tir *tir;
+
+ WARN_ON(inner && !rss->inner_ft_support);
+ tir = rss_get_tir(rss, tt, inner);
+ WARN_ON(!tir);
+
+ return mlx5e_tir_get_tirn(tir);
+}
+
+/* Fill the "tirn" output parameter.
+ * Create the requested TIR if it's its first usage.
+ */
+int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
+ enum mlx5_traffic_types tt,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner, u32 *tirn)
+{
+ struct mlx5e_tir *tir;
+
+ tir = rss_get_tir(rss, tt, inner);
+ if (!tir) { /* TIR doesn't exist, create one */
+ int err;
+
+ err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ if (err)
+ return err;
+ tir = rss_get_tir(rss, tt, inner);
+ }
+
+ *tirn = mlx5e_tir_get_tirn(tir);
+ return 0;
+}
+
+static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+{
+ int err;
+
+ err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir);
+ if (err)
+ mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
+ mlx5e_rqt_get_rqtn(&rss->rqt), err);
+}
+
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+{
+ rss->enabled = true;
+ mlx5e_rss_apply(rss, rqns, num_rqns);
+}
+
+void mlx5e_rss_disable(struct mlx5e_rss *rss)
+{
+ int err;
+
+ rss->enabled = false;
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn);
+ if (err)
+ mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
+ mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
+}
+
+int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param)
+{
+ struct mlx5e_tir_builder *builder;
+ enum mlx5_traffic_types tt;
+ int err, final_err;
+
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
+
+ mlx5e_tir_builder_build_lro(builder, lro_param);
+
+ final_err = 0;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ struct mlx5e_tir *tir;
+
+ tir = rss_get_tir(rss, tt, false);
+ if (!tir)
+ goto inner_tir;
+ err = mlx5e_tir_modify(tir, builder);
+ if (err) {
+ mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_tir_get_tirn(tir), tt, err);
+ if (!final_err)
+ final_err = err;
+ }
+
+inner_tir:
+ if (!rss->inner_ft_support)
+ continue;
+
+ tir = rss_get_tir(rss, tt, true);
+ if (!tir)
+ continue;
+ err = mlx5e_tir_modify(tir, builder);
+ if (err) {
+ mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_tir_get_tirn(tir), tt, err);
+ if (!final_err)
+ final_err = err;
+ }
+ }
+
+ mlx5e_tir_builder_free(builder);
+ return final_err;
+}
+
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+{
+ unsigned int i;
+
+ if (indir)
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ indir[i] = rss->indir.table[i];
+
+ if (key)
+ memcpy(key, rss->hash.toeplitz_hash_key,
+ sizeof(rss->hash.toeplitz_hash_key));
+
+ if (hfunc)
+ *hfunc = rss->hash.hfunc;
+
+ return 0;
+}
+
+int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
+ const u8 *key, const u8 *hfunc,
+ u32 *rqns, unsigned int num_rqns)
+{
+ bool changed_indir = false;
+ bool changed_hash = false;
+
+ if (hfunc && *hfunc != rss->hash.hfunc) {
+ switch (*hfunc) {
+ case ETH_RSS_HASH_XOR:
+ case ETH_RSS_HASH_TOP:
+ break;
+ default:
+ return -EINVAL;
+ }
+ changed_hash = true;
+ changed_indir = true;
+ rss->hash.hfunc = *hfunc;
+ }
+
+ if (key) {
+ if (rss->hash.hfunc == ETH_RSS_HASH_TOP)
+ changed_hash = true;
+ memcpy(rss->hash.toeplitz_hash_key, key,
+ sizeof(rss->hash.toeplitz_hash_key));
+ }
+
+ if (indir) {
+ unsigned int i;
+
+ changed_indir = true;
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ rss->indir.table[i] = indir[i];
+ }
+
+ if (changed_indir && rss->enabled)
+ mlx5e_rss_apply(rss, rqns, num_rqns);
+
+ if (changed_hash)
+ mlx5e_rss_update_tirs(rss);
+
+ return 0;
+}
+
+struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
+{
+ return rss->hash;
+}
+
+u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
+{
+ return rss->rx_hash_fields[tt];
+}
+
+int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields)
+{
+ u8 old_rx_hash_fields;
+ int err;
+
+ old_rx_hash_fields = rss->rx_hash_fields[tt];
+
+ if (old_rx_hash_fields == rx_hash_fields)
+ return 0;
+
+ rss->rx_hash_fields[tt] = rx_hash_fields;
+
+ err = mlx5e_rss_update_tir(rss, tt, false);
+ if (err) {
+ rss->rx_hash_fields[tt] = old_rx_hash_fields;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ return err;
+ }
+
+ if (!(rss->inner_ft_support))
+ return 0;
+
+ err = mlx5e_rss_update_tir(rss, tt, true);
+ if (err) {
+ /* Partial update happened. Try to revert - it may fail too, but
+ * there is nothing more we can do.
+ */
+ rss->rx_hash_fields[tt] = old_rx_hash_fields;
+ mlx5e_rss_warn(rss->mdev,
+ "Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n",
+ tt, err);
+ if (mlx5e_rss_update_tir(rss, tt, false))
+ mlx5e_rss_warn(rss->mdev,
+ "Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n",
+ tt);
+ }
+
+ return err;
+}
+
+void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch)
+{
+ mlx5e_rss_params_indir_init_uniform(&rss->indir, nch);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
new file mode 100644
index 000000000000..d522a10dadf3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_RSS_H__
+#define __MLX5_EN_RSS_H__
+
+#include "rqt.h"
+#include "tir.h"
+#include "fs.h"
+
+struct mlx5e_rss_params_traffic_type
+mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
+
+struct mlx5e_rss;
+
+struct mlx5e_rss *mlx5e_rss_alloc(void);
+void mlx5e_rss_free(struct mlx5e_rss *rss);
+int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_lro_param *init_lro_param);
+int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
+ bool inner_ft_support, u32 drop_rqn);
+int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
+
+void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
+void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
+unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+
+u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ bool inner);
+int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
+ enum mlx5_traffic_types tt,
+ const struct mlx5e_lro_param *init_lro_param,
+ bool inner, u32 *tirn);
+
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
+void mlx5e_rss_disable(struct mlx5e_rss *rss);
+
+int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param);
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
+ const u8 *key, const u8 *hfunc,
+ u32 *rqns, unsigned int num_rqns);
+struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
+u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
+int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields);
+void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch);
+#endif /* __MLX5_EN_RSS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
new file mode 100644
index 000000000000..bf0313e2682b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "rx_res.h"
+#include "channels.h"
+#include "params.h"
+
+#define MLX5E_MAX_NUM_RSS 16
+
+struct mlx5e_rx_res {
+ struct mlx5_core_dev *mdev;
+ enum mlx5e_rx_res_features features;
+ unsigned int max_nch;
+ u32 drop_rqn;
+
+ struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
+ bool rss_active;
+ u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
+ unsigned int rss_nch;
+
+ struct {
+ struct mlx5e_rqt direct_rqt;
+ struct mlx5e_tir direct_tir;
+ struct mlx5e_rqt xsk_rqt;
+ struct mlx5e_tir xsk_tir;
+ } *channels;
+
+ struct {
+ struct mlx5e_rqt rqt;
+ struct mlx5e_tir tir;
+ } ptp;
+};
+
+/* API for rx_res_rss_* */
+
+static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
+ const struct mlx5e_lro_param *init_lro_param,
+ unsigned int init_nch)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss *rss;
+ int err;
+
+ if (WARN_ON(res->rss[0]))
+ return -EINVAL;
+
+ rss = mlx5e_rss_alloc();
+ if (!rss)
+ return -ENOMEM;
+
+ err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
+ init_lro_param);
+ if (err)
+ goto err_rss_free;
+
+ mlx5e_rss_set_indir_uniform(rss, init_nch);
+
+ res->rss[0] = rss;
+
+ return 0;
+
+err_rss_free:
+ mlx5e_rss_free(rss);
+ return err;
+}
+
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss *rss;
+ int err, i;
+
+ for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
+ if (!res->rss[i])
+ break;
+
+ if (i == MLX5E_MAX_NUM_RSS)
+ return -ENOSPC;
+
+ rss = mlx5e_rss_alloc();
+ if (!rss)
+ return -ENOMEM;
+
+ err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn);
+ if (err)
+ goto err_rss_free;
+
+ mlx5e_rss_set_indir_uniform(rss, init_nch);
+ if (res->rss_active)
+ mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+
+ res->rss[i] = rss;
+ *rss_idx = i;
+
+ return 0;
+
+err_rss_free:
+ mlx5e_rss_free(rss);
+ return err;
+}
+
+static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
+{
+ struct mlx5e_rss *rss = res->rss[rss_idx];
+ int err;
+
+ err = mlx5e_rss_cleanup(rss);
+ if (err)
+ return err;
+
+ mlx5e_rss_free(rss);
+ res->rss[rss_idx] = NULL;
+
+ return 0;
+}
+
+int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
+{
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -EINVAL;
+
+ return __mlx5e_rx_res_rss_destroy(res, rss_idx);
+}
+
+static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ struct mlx5e_rss *rss = res->rss[i];
+ int err;
+
+ if (!rss)
+ continue;
+
+ err = __mlx5e_rx_res_rss_destroy(res, i);
+ if (err) {
+ unsigned int refcount;
+
+ refcount = mlx5e_rss_refcnt_read(rss);
+ mlx5_core_warn(res->mdev,
+ "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
+ i, refcount, err);
+ }
+ }
+}
+
+static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
+{
+ int i;
+
+ res->rss_active = true;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ struct mlx5e_rss *rss = res->rss[i];
+
+ if (!rss)
+ continue;
+ mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ }
+}
+
+static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
+{
+ int i;
+
+ res->rss_active = false;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ struct mlx5e_rss *rss = res->rss[i];
+
+ if (!rss)
+ continue;
+ mlx5e_rss_disable(rss);
+ }
+}
+
+/* Updates the indirection table SW shadow, does not update the HW resources yet */
+void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
+{
+ WARN_ON_ONCE(res->rss_active);
+ mlx5e_rss_set_indir_uniform(res->rss[0], nch);
+}
+
+int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -ENOENT;
+
+ return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+}
+
+int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ const u32 *indir, const u8 *key, const u8 *hfunc)
+{
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -ENOENT;
+
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
+}
+
+u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_get_hash_fields(rss, tt);
+}
+
+int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
+}
+
+int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
+{
+ int i, cnt;
+
+ cnt = 0;
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
+ if (res->rss[i])
+ cnt++;
+
+ return cnt;
+}
+
+int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
+{
+ int i;
+
+ if (!rss)
+ return -EINVAL;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
+ if (rss == res->rss[i])
+ return i;
+
+ return -ENOENT;
+}
+
+struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
+{
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return NULL;
+
+ return res->rss[rss_idx];
+}
+
+/* End of API rx_res_rss_* */
+
+struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
+}
+
+static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
+ const struct mlx5e_lro_param *init_lro_param)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ int err = 0;
+ int ix;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
+ if (!res->channels) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
+ res->mdev, false, res->drop_rqn);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_direct_rqts;
+ }
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_direct_tirs;
+ }
+
+ mlx5e_tir_builder_clear(builder);
+ }
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ goto out;
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
+ res->mdev, false, res->drop_rqn);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_xsk_rqts;
+ }
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ inner_ft_support);
+ mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
+ err, ix);
+ goto err_destroy_xsk_tirs;
+ }
+
+ mlx5e_tir_builder_clear(builder);
+ }
+
+ goto out;
+
+err_destroy_xsk_tirs:
+ while (--ix >= 0)
+ mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
+
+ ix = res->max_nch;
+err_destroy_xsk_rqts:
+ while (--ix >= 0)
+ mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
+
+ ix = res->max_nch;
+err_destroy_direct_tirs:
+ while (--ix >= 0)
+ mlx5e_tir_destroy(&res->channels[ix].direct_tir);
+
+ ix = res->max_nch;
+err_destroy_direct_rqts:
+ while (--ix >= 0)
+ mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
+
+ kvfree(res->channels);
+
+out:
+ mlx5e_tir_builder_free(builder);
+
+ return err;
+}
+
+static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ int err;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
+ if (err)
+ goto out;
+
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
+ mlx5e_rqt_get_rqtn(&res->ptp.rqt),
+ inner_ft_support);
+ mlx5e_tir_builder_build_direct(builder);
+
+ err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
+ if (err)
+ goto err_destroy_ptp_rqt;
+
+ goto out;
+
+err_destroy_ptp_rqt:
+ mlx5e_rqt_destroy(&res->ptp.rqt);
+
+out:
+ mlx5e_tir_builder_free(builder);
+ return err;
+}
+
+static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
+{
+ unsigned int ix;
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ mlx5e_tir_destroy(&res->channels[ix].direct_tir);
+ mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
+ mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
+ }
+
+ kvfree(res->channels);
+}
+
+static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
+{
+ mlx5e_tir_destroy(&res->ptp.tir);
+ mlx5e_rqt_destroy(&res->ptp.rqt);
+}
+
+int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
+ enum mlx5e_rx_res_features features, unsigned int max_nch,
+ u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ unsigned int init_nch)
+{
+ int err;
+
+ res->mdev = mdev;
+ res->features = features;
+ res->max_nch = max_nch;
+ res->drop_rqn = drop_rqn;
+
+ err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch);
+ if (err)
+ goto err_out;
+
+ err = mlx5e_rx_res_channels_init(res, init_lro_param);
+ if (err)
+ goto err_rss_destroy;
+
+ err = mlx5e_rx_res_ptp_init(res);
+ if (err)
+ goto err_channels_destroy;
+
+ return 0;
+
+err_channels_destroy:
+ mlx5e_rx_res_channels_destroy(res);
+err_rss_destroy:
+ __mlx5e_rx_res_rss_destroy(res, 0);
+err_out:
+ return err;
+}
+
+void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
+{
+ mlx5e_rx_res_ptp_destroy(res);
+ mlx5e_rx_res_channels_destroy(res);
+ mlx5e_rx_res_rss_destroy_all(res);
+}
+
+void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
+{
+ kvfree(res);
+}
+
+u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
+}
+
+u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
+
+ return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
+}
+
+u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_get_tirn(rss, tt, false);
+}
+
+u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+{
+ struct mlx5e_rss *rss = res->rss[0];
+
+ return mlx5e_rss_get_tirn(rss, tt, true);
+}
+
+u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
+{
+ WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
+ return mlx5e_tir_get_tirn(&res->ptp.tir);
+}
+
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
+}
+
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+{
+ unsigned int nch, ix;
+ int err;
+
+ nch = mlx5e_channels_get_num(chs);
+
+ for (ix = 0; ix < chs->num; ix++)
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ res->rss_nch = chs->num;
+
+ mlx5e_rx_res_rss_enable(res);
+
+ for (ix = 0; ix < nch; ix++) {
+ u32 rqn;
+
+ mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ rqn, ix, err);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
+ rqn = res->drop_rqn;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ rqn, ix, err);
+ }
+ for (ix = nch; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ res->drop_rqn, ix, err);
+ }
+
+ if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
+ u32 rqn;
+
+ if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+ rqn = res->drop_rqn;
+
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->ptp.rqt),
+ rqn, err);
+ }
+}
+
+void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
+{
+ unsigned int ix;
+ int err;
+
+ mlx5e_rx_res_rss_disable(res);
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+
+ if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
+ continue;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ res->drop_rqn, ix, err);
+ }
+
+ if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->ptp.rqt),
+ res->drop_rqn, err);
+ }
+}
+
+int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix)
+{
+ u32 rqn;
+ int err;
+
+ if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
+ return -EINVAL;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ rqn, ix, err);
+ return err;
+}
+
+int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
+{
+ int err;
+
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
+ res->drop_rqn, ix, err);
+ return err;
+}
+
+int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
+{
+ struct mlx5e_tir_builder *builder;
+ int err, final_err;
+ unsigned int ix;
+
+ builder = mlx5e_tir_builder_alloc(true);
+ if (!builder)
+ return -ENOMEM;
+
+ mlx5e_tir_builder_build_lro(builder, lro_param);
+
+ final_err = 0;
+
+ for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
+ struct mlx5e_rss *rss = res->rss[ix];
+
+ if (!rss)
+ continue;
+
+ err = mlx5e_rss_lro_set_param(rss, lro_param);
+ if (err)
+ final_err = final_err ? : err;
+ }
+
+ for (ix = 0; ix < res->max_nch; ix++) {
+ err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
+ if (err) {
+ mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
+ mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
+ if (!final_err)
+ final_err = err;
+ }
+ }
+
+ mlx5e_tir_builder_free(builder);
+ return final_err;
+}
+
+struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
+{
+ return mlx5e_rss_get_hash(res->rss[0]);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
new file mode 100644
index 000000000000..4a15942d79f7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_RX_RES_H__
+#define __MLX5_EN_RX_RES_H__
+
+#include <linux/kernel.h>
+#include "rqt.h"
+#include "tir.h"
+#include "fs.h"
+#include "rss.h"
+
+struct mlx5e_rx_res;
+
+struct mlx5e_channels;
+struct mlx5e_rss_params_hash;
+
+enum mlx5e_rx_res_features {
+ MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
+ MLX5E_RX_RES_FEATURE_XSK = BIT(1),
+ MLX5E_RX_RES_FEATURE_PTP = BIT(2),
+};
+
+/* Setup */
+struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
+int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
+ enum mlx5e_rx_res_features features, unsigned int max_nch,
+ u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ unsigned int init_nch);
+void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
+void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
+
+/* TIRN getters for flow steering */
+u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix);
+u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
+u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
+u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+
+/* RQTN getters for modules that create their own TIRs */
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+
+/* Activate/deactivate API */
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
+void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
+int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix);
+int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix);
+
+/* Configuration API */
+void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
+int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
+ const u32 *indir, const u8 *key, const u8 *hfunc);
+
+u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
+int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
+ u8 rx_hash_fields);
+int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
+
+int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
+int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
+int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
+int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
+struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
+
+/* Workaround for hairpin */
+struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
+
+#endif /* __MLX5_EN_RX_RES_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 91e7a01e32be..b1707b86aa16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2138,6 +2138,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
struct mlx5_tc_ct_priv *ct_priv;
struct mlx5_core_dev *dev;
const char *msg;
+ u64 mapping_id;
int err;
dev = priv->mdev;
@@ -2153,13 +2154,17 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!ct_priv)
goto err_alloc;
- ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
+ mapping_id = mlx5_query_nic_system_image_guid(dev);
+
+ ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE,
+ sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping_zone;
}
- ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true);
+ ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS,
+ sizeof(u32) * 4, 0, true);
if (IS_ERR(ct_priv->labels_mapping)) {
err = PTR_ERR(ct_priv->labels_mapping);
goto err_mapping_labels;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 1e2d117082d4..b4e986818794 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -525,7 +525,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->out_dev = attr.out_dev;
e->route_dev_ifindex = attr.route_dev->ifindex;
- /* It's importent to add the neigh to the hash table before checking
+ /* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh
* in the hash.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
new file mode 100644
index 000000000000..de936dc4bc48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#include "tir.h"
+#include "params.h"
+#include <linux/mlx5/transobj.h>
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+
+/* max() doesn't work inside square brackets. */
+#define MLX5E_TIR_CMD_IN_SZ_DW ( \
+ MLX5_ST_SZ_DW(create_tir_in) > MLX5_ST_SZ_DW(modify_tir_in) ? \
+ MLX5_ST_SZ_DW(create_tir_in) : MLX5_ST_SZ_DW(modify_tir_in) \
+)
+
+struct mlx5e_tir_builder {
+ u32 in[MLX5E_TIR_CMD_IN_SZ_DW];
+ bool modify;
+};
+
+struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
+{
+ struct mlx5e_tir_builder *builder;
+
+ builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
+ builder->modify = modify;
+
+ return builder;
+}
+
+void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder)
+{
+ kvfree(builder);
+}
+
+void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder)
+{
+ memset(builder->in, 0, sizeof(builder->in));
+}
+
+static void *mlx5e_tir_builder_get_tirc(struct mlx5e_tir_builder *builder)
+{
+ if (builder->modify)
+ return MLX5_ADDR_OF(modify_tir_in, builder->in, ctx);
+ return MLX5_ADDR_OF(create_tir_in, builder->in, ctx);
+}
+
+void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
+ MLX5_SET(tirc, tirc, inline_rqn, rqn);
+}
+
+void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
+ u32 rqtn, bool inner_ft_support)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
+}
+
+void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_lro_param *lro_param)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+ const unsigned int rough_max_l2_l3_hdr_sz = 256;
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
+
+ if (!lro_param->enabled)
+ return;
+
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
+}
+
+static int mlx5e_hfunc_to_hw(u8 hfunc)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ return MLX5_RX_HASH_FN_TOEPLITZ;
+ case ETH_RSS_HASH_XOR:
+ return MLX5_RX_HASH_FN_INVERTED_XOR8;
+ default:
+ return MLX5_RX_HASH_FN_NONE;
+ }
+}
+
+void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_rss_params_hash *rss_hash,
+ const struct mlx5e_rss_params_traffic_type *rss_tt,
+ bool inner)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+ void *hfso;
+
+ if (builder->modify)
+ MLX5_SET(modify_tir_in, builder->in, bitmask.hash, 1);
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_hfunc_to_hw(rss_hash->hfunc));
+ if (rss_hash->hfunc == ETH_RSS_HASH_TOP) {
+ const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
+ }
+
+ if (inner)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, rss_tt->l3_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, rss_tt->l4_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss_tt->rx_hash_fields);
+}
+
+void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
+{
+ void *tirc = mlx5e_tir_builder_get_tirc(builder);
+
+ WARN_ON(builder->modify);
+
+ MLX5_SET(tirc, tirc, tls_en, 1);
+ MLX5_SET(tirc, tirc, self_lb_block,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+}
+
+int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
+ struct mlx5_core_dev *mdev, bool reg)
+{
+ int err;
+
+ tir->mdev = mdev;
+
+ err = mlx5_core_create_tir(tir->mdev, builder->in, &tir->tirn);
+ if (err)
+ return err;
+
+ if (reg) {
+ struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs;
+
+ mutex_lock(&res->td.list_lock);
+ list_add(&tir->list, &res->td.tirs_list);
+ mutex_unlock(&res->td.list_lock);
+ } else {
+ INIT_LIST_HEAD(&tir->list);
+ }
+
+ return 0;
+}
+
+void mlx5e_tir_destroy(struct mlx5e_tir *tir)
+{
+ struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs;
+
+ /* Skip mutex if list_del is no-op (the TIR wasn't registered in the
+ * list). list_empty will never return true for an item of tirs_list,
+ * and READ_ONCE/WRITE_ONCE in list_empty/list_del guarantee consistency
+ * of the list->next value.
+ */
+ if (!list_empty(&tir->list)) {
+ mutex_lock(&res->td.list_lock);
+ list_del(&tir->list);
+ mutex_unlock(&res->td.list_lock);
+ }
+
+ mlx5_core_destroy_tir(tir->mdev, tir->tirn);
+}
+
+int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder)
+{
+ return mlx5_core_modify_tir(tir->mdev, tir->tirn, builder->in);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
new file mode 100644
index 000000000000..e45149a78ed9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_TIR_H__
+#define __MLX5_EN_TIR_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_rss_params_hash {
+ u8 hfunc;
+ u8 toeplitz_hash_key[40];
+};
+
+struct mlx5e_rss_params_traffic_type {
+ u8 l3_prot_type;
+ u8 l4_prot_type;
+ u32 rx_hash_fields;
+};
+
+struct mlx5e_tir_builder;
+struct mlx5e_lro_param;
+
+struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
+void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder);
+
+void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
+void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
+ u32 rqtn, bool inner_ft_support);
+void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_lro_param *lro_param);
+void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_rss_params_hash *rss_hash,
+ const struct mlx5e_rss_params_traffic_type *rss_tt,
+ bool inner);
+void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder);
+void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder);
+
+struct mlx5_core_dev;
+
+struct mlx5e_tir {
+ struct mlx5_core_dev *mdev;
+ u32 tirn;
+ struct list_head list;
+};
+
+int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
+ struct mlx5_core_dev *mdev, bool reg);
+void mlx5e_tir_destroy(struct mlx5e_tir *tir);
+
+static inline u32 mlx5e_tir_get_tirn(struct mlx5e_tir *tir)
+{
+ return tir->tirn;
+}
+
+int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder);
+
+#endif /* __MLX5_EN_TIR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 7f94508594fb..d54607a42740 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -92,30 +92,19 @@ static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
u32 rqn)
{
- void *tirc;
- int inlen;
- u32 *in;
+ struct mlx5e_tir_builder *builder;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn, rqn);
- err = mlx5e_create_tir(mdev, tir, in);
- kvfree(in);
+ mlx5e_tir_builder_build_inline(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqn);
+ err = mlx5e_tir_init(tir, builder, mdev, true);
- return err;
-}
+ mlx5e_tir_builder_free(builder);
-static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
-{
- mlx5e_destroy_tir(mdev, tir);
+ return err;
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
@@ -173,7 +162,7 @@ err_napi_del:
void mlx5e_close_trap(struct mlx5e_trap *trap)
{
- mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
+ mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
netif_napi_del(&trap->napi);
kvfree(trap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 71e8d66fa150..7b562d2c8a19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -122,7 +122,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
* any Fill Ring entries at the setup stage.
*/
- err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]);
+ err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix);
if (unlikely(err))
goto err_deactivate;
@@ -169,7 +169,7 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
goto remove_pool;
c = priv->channels.c[ix];
- mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
+ mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix);
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index a8315f166696..538bc2419bd8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -126,7 +126,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
/* Create a separate SQ, so that when the buff pool is disabled, we could
* close this SQ safely and stop receiving CQEs. In other case, e.g., if
* the XDPSQ was used instead, we might run into trouble when the buff pool
- * is disabled and then reenabled, but the SQ continues receiving CQEs
+ * is disabled and then re-enabled, but the SQ continues receiving CQEs
* from the old buff pool.
*/
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
@@ -183,73 +183,3 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
mlx5e_deactivate_rq(&c->xskrq);
/* TX queue is disabled on close. */
}
-
-static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
-{
- struct mlx5e_redirect_rqt_param direct_rrp = {
- .is_rss = false,
- {
- .rqn = rqn,
- },
- };
-
- u32 rqtn = priv->xsk_tir[ix].rqt.rqtn;
-
- return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
-}
-
-int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
-{
- return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn);
-}
-
-int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
-{
- return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn);
-}
-
-int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
-{
- int err, i;
-
- if (!priv->xsk.refcnt)
- return 0;
-
- for (i = 0; i < chs->num; i++) {
- struct mlx5e_channel *c = chs->c[i];
-
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
- continue;
-
- err = mlx5e_xsk_redirect_rqt_to_channel(priv, c);
- if (unlikely(err))
- goto err_stop;
- }
-
- return 0;
-
-err_stop:
- for (i--; i >= 0; i--) {
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
- continue;
-
- mlx5e_xsk_redirect_rqt_to_drop(priv, i);
- }
-
- return err;
-}
-
-void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
-{
- int i;
-
- if (!priv->xsk.refcnt)
- return;
-
- for (i = 0; i < chs->num; i++) {
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
- continue;
-
- mlx5e_xsk_redirect_rqt_to_drop(priv, i);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
index ca20f1ff5e39..50e111b85efd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -17,9 +17,5 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
void mlx5e_deactivate_xsk(struct mlx5e_channel *c);
-int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c);
-int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix);
-int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
-void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
#endif /* __MLX5_EN_XSK_SETUP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index e51f60b55daa..4c4ee524176c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -16,13 +16,13 @@ struct mlx5e_accel_fs_tcp {
struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES];
};
-static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i)
+static enum mlx5_traffic_types fs_accel2tt(enum accel_fs_tcp_type i)
{
switch (i) {
case ACCEL_FS_IPV4_TCP:
- return MLX5E_TT_IPV4_TCP;
+ return MLX5_TT_IPV4_TCP;
default: /* ACCEL_FS_IPV6_TCP */
- return MLX5E_TT_IPV6_TCP;
+ return MLX5_TT_IPV6_TCP;
}
}
@@ -161,7 +161,7 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
fs_tcp = priv->fs.accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5e_ttc_get_default_dest(priv, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5e_ttc_fwd_default_dest(priv, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -329,7 +329,7 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
dest.ft = priv->fs.accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5e_ttc_fwd_dest(priv, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 34119ce92031..17da23dff0ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -41,11 +41,11 @@ struct mlx5e_ipsec_tx {
};
/* IPsec RX flow steering */
-static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
+static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
{
if (i == ACCEL_FS_ESP4)
- return MLX5E_TT_IPV4_IPSEC_ESP;
- return MLX5E_TT_IPV6_IPSEC_ESP;
+ return MLX5_TT_IPV4_IPSEC_ESP;
+ return MLX5_TT_IPV6_IPSEC_ESP;
}
static int rx_err_add_rule(struct mlx5e_priv *priv,
@@ -265,7 +265,8 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
- fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type));
+ fs_prot->default_dest =
+ mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
if (err)
@@ -301,7 +302,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
out:
mutex_unlock(&fs_prot->prot_mutex);
@@ -320,7 +321,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 4e58fade7a60..62abce008c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -49,7 +49,7 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
- u32 tirn;
+ struct mlx5e_tir tir;
u32 key_id;
u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
@@ -99,31 +99,22 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
+static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
{
- int err, inlen;
- void *tirc;
- u32 *in;
+ struct mlx5e_tir_builder *builder;
+ int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-
- MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
- MLX5_SET(tirc, tirc, indirect_table, rqtn);
- MLX5_SET(tirc, tirc, tls_en, 1);
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+ mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
+ mlx5e_tir_builder_build_direct(builder);
+ mlx5e_tir_builder_build_tls(builder);
+ err = mlx5e_tir_init(tir, builder, mdev, false);
- err = mlx5_core_create_tir(mdev, in, tirn);
+ mlx5e_tir_builder_free(builder);
- kvfree(in);
return err;
}
@@ -139,7 +130,8 @@ static void accel_rule_handle_work(struct work_struct *work)
goto out;
rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
- priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG);
+ mlx5e_tir_get_tirn(&priv_rx->tir),
+ MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
accel_rule->rule = rule;
out:
@@ -173,8 +165,8 @@ post_static_params(struct mlx5e_icosq *sq,
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
- priv_rx->tirn, priv_rx->key_id,
- priv_rx->resync.seq, false,
+ mlx5e_tir_get_tirn(&priv_rx->tir),
+ priv_rx->key_id, priv_rx->resync.seq, false,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
@@ -202,8 +194,9 @@ post_progress_params(struct mlx5e_icosq *sq,
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
- mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false,
- next_record_tcp_sn,
+ mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn,
+ mlx5e_tir_get_tirn(&priv_rx->tir),
+ false, next_record_tcp_sn,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
@@ -325,7 +318,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
psv = &wqe->psv;
psv->num_psv = 1 << 4;
psv->l_key = sq->channel->mkey_be;
- psv->psv_index[0] = cpu_to_be32(priv_rx->tirn);
+ psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir));
psv->va = cpu_to_be64(buf->dma_addr);
wi = (struct mlx5e_icosq_wqe_info) {
@@ -635,9 +628,9 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
- rqtn = priv->direct_tir[rxq].rqt.rqtn;
+ rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
- err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
+ err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
if (err)
goto err_create_tir;
@@ -658,7 +651,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
return 0;
err_post_wqes:
- mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+ mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
err_create_key:
@@ -693,7 +686,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
- mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+ mlx5e_tir_destroy(&priv_rx->tir);
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
/* priv_rx should normally be freed here, but if there is an outstanding
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 25403af32859..fe5d82fa6e92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -98,17 +98,17 @@ struct arfs_rule {
for (j = 0; j < ARFS_HASH_SIZE; j++) \
hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
-static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
{
switch (type) {
case ARFS_IPV4_TCP:
- return MLX5E_TT_IPV4_TCP;
+ return MLX5_TT_IPV4_TCP;
case ARFS_IPV4_UDP:
- return MLX5E_TT_IPV4_UDP;
+ return MLX5_TT_IPV4_UDP;
case ARFS_IPV6_TCP:
- return MLX5E_TT_IPV6_TCP;
+ return MLX5_TT_IPV6_TCP;
case ARFS_IPV6_UDP:
- return MLX5E_TT_IPV6_UDP;
+ return MLX5_TT_IPV6_UDP;
default:
return -EINVAL;
}
@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5e_ttc_fwd_default_dest(priv, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -149,7 +149,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
@@ -192,10 +192,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
- struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
- enum mlx5e_traffic_types tt;
+ enum mlx5_traffic_types tt;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
@@ -206,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
return -EINVAL;
}
- /* FIXME: Must use mlx5e_ttc_get_default_dest(),
+ /* FIXME: Must use mlx5_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
- dest.tir_num = tir[tt].tirn;
+ dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
@@ -553,7 +552,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+ dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -576,7 +575,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst.tir_num = priv->direct_tir[rxq].tirn;
+ dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 8c166ee56d8b..84eb7201c142 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -33,36 +33,9 @@
#include "en.h"
/* mlx5e global resources should be placed in this file.
- * Global resources are common to all the netdevices crated on the same nic.
+ * Global resources are common to all the netdevices created on the same nic.
*/
-int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
-{
- struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
- int err;
-
- err = mlx5_core_create_tir(mdev, in, &tir->tirn);
- if (err)
- return err;
-
- mutex_lock(&res->td.list_lock);
- list_add(&tir->list, &res->td.tirs_list);
- mutex_unlock(&res->td.list_lock);
-
- return 0;
-}
-
-void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir)
-{
- struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
-
- mutex_lock(&res->td.list_lock);
- mlx5_core_destroy_tir(mdev, tir->tirn);
- list_del(&tir->list);
- mutex_unlock(&res->td.list_lock);
-}
-
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index bd72572e03d1..5696d3f1baaf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -420,6 +420,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
unsigned int count = ch->combined_count;
struct mlx5e_params new_params;
bool arfs_enabled;
+ int rss_cnt;
bool opened;
int err = 0;
@@ -455,6 +456,27 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
+ /* Don't allow changing the number of channels if non-default RSS contexts exist,
+ * the kernel doesn't protect against set_channels operations that break them.
+ */
+ rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1;
+ if (rss_cnt) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n",
+ __func__, rss_cnt);
+ goto out;
+ }
+
+ /* Don't allow changing the number of channels if MQPRIO mode channel offload is active,
+ * because it defines a partition over the channels queues.
+ */
+ if (cur_params->mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: MQPRIO mode channel offload is active, cannot change the number of channels\n",
+ __func__);
+ goto out;
+ }
+
new_params = *cur_params;
new_params.num_channels = count;
@@ -1172,7 +1194,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
- return sizeof(priv->rss_params.toeplitz_hash_key);
+ return sizeof_field(struct mlx5e_rss_params_hash, toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
@@ -1194,88 +1216,64 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rss_params *rss = &priv->rss_params;
-
- if (indir)
- memcpy(indir, rss->indirection_rqt,
- sizeof(rss->indirection_rqt));
-
- if (key)
- memcpy(key, rss->toeplitz_hash_key,
- sizeof(rss->toeplitz_hash_key));
-
- if (hfunc)
- *hfunc = rss->hfunc;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
- return 0;
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
+ mutex_unlock(&priv->state_lock);
+ return err;
}
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_rss_params *rss = &priv->rss_params;
- int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- bool refresh_tirs = false;
- bool refresh_rqt = false;
- void *in;
-
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
- (hfunc != ETH_RSS_HASH_XOR) &&
- (hfunc != ETH_RSS_HASH_TOP))
- return -EINVAL;
-
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ int err;
mutex_lock(&priv->state_lock);
-
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) {
- rss->hfunc = hfunc;
- refresh_rqt = true;
- refresh_tirs = true;
- }
-
- if (indir) {
- memcpy(rss->indirection_rqt, indir,
- sizeof(rss->indirection_rqt));
- refresh_rqt = true;
+ if (delete) {
+ err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
+ goto unlock;
}
- if (key) {
- memcpy(rss->toeplitz_hash_key, key,
- sizeof(rss->toeplitz_hash_key));
- refresh_tirs = refresh_tirs || rss->hfunc == ETH_RSS_HASH_TOP;
- }
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ unsigned int count = priv->channels.params.num_channels;
- if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- struct mlx5e_redirect_rqt_param rrp = {
- .is_rss = true,
- {
- .rss = {
- .hfunc = rss->hfunc,
- .channels = &priv->channels,
- },
- },
- };
- u32 rqtn = priv->indir_rqt.rqtn;
-
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
+ err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
+ if (err)
+ goto unlock;
}
- if (refresh_tirs)
- mlx5e_modify_tirs_hash(priv, in);
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+unlock:
mutex_unlock(&priv->state_lock);
+ return err;
+}
- kvfree(in);
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
+}
- return 0;
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ mutex_unlock(&priv->state_lock);
+ return err;
}
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
@@ -2358,6 +2356,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_rxfh_context = mlx5e_get_rxfh_context,
+ .set_rxfh_context = mlx5e_set_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0b75fab41ae8..c06b4b938ae7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -718,7 +718,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.ttc.ft.t;
+ dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
rule_p = &priv->fs.promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -854,593 +854,59 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
ft->t = NULL;
}
-static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
- mlx5_del_flow_rules(ttc->rules[i].rule);
- ttc->rules[i].rule = NULL;
- }
- }
-
- for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
- if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
- mlx5_del_flow_rules(ttc->tunnel_rules[i]);
- ttc->tunnel_rules[i] = NULL;
- }
- }
-}
-
-struct mlx5e_etype_proto {
- u16 etype;
- u8 proto;
-};
-
-static struct mlx5e_etype_proto ttc_rules[] = {
- [MLX5E_TT_IPV4_TCP] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_TCP,
- },
- [MLX5E_TT_IPV6_TCP] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_TCP,
- },
- [MLX5E_TT_IPV4_UDP] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_UDP,
- },
- [MLX5E_TT_IPV6_UDP] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_UDP,
- },
- [MLX5E_TT_IPV4_IPSEC_AH] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_AH,
- },
- [MLX5E_TT_IPV6_IPSEC_AH] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_AH,
- },
- [MLX5E_TT_IPV4_IPSEC_ESP] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_ESP,
- },
- [MLX5E_TT_IPV6_IPSEC_ESP] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_ESP,
- },
- [MLX5E_TT_IPV4] = {
- .etype = ETH_P_IP,
- .proto = 0,
- },
- [MLX5E_TT_IPV6] = {
- .etype = ETH_P_IPV6,
- .proto = 0,
- },
- [MLX5E_TT_ANY] = {
- .etype = 0,
- .proto = 0,
- },
-};
-
-static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
- [MLX5E_TT_IPV4_GRE] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_GRE,
- },
- [MLX5E_TT_IPV6_GRE] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_GRE,
- },
- [MLX5E_TT_IPV4_IPIP] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_IPIP,
- },
- [MLX5E_TT_IPV6_IPIP] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_IPIP,
- },
- [MLX5E_TT_IPV4_IPV6] = {
- .etype = ETH_P_IP,
- .proto = IPPROTO_IPV6,
- },
- [MLX5E_TT_IPV6_IPV6] = {
- .etype = ETH_P_IPV6,
- .proto = IPPROTO_IPV6,
- },
-
-};
-
-u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)
-{
- return ttc_tunnel_rules[tt].proto;
-}
-
-static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type)
-{
- switch (proto_type) {
- case IPPROTO_GRE:
- return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
- case IPPROTO_IPIP:
- case IPPROTO_IPV6:
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
- MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
- default:
- return false;
- }
-}
-
-static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
-{
- int tt;
-
- for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
- if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto))
- return true;
- }
- return false;
-}
-
-bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
-{
- return (mlx5e_tunnel_any_rx_proto_supported(mdev) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
-}
-
-static u8 mlx5e_etype_to_ipv(u16 ethertype)
-{
- if (ethertype == ETH_P_IP)
- return 4;
-
- if (ethertype == ETH_P_IPV6)
- return 6;
-
- return 0;
-}
-
-static struct mlx5_flow_handle *
-mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_destination *dest,
- u16 etype,
- u8 proto)
-{
- int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
- MLX5_DECLARE_FLOW_ACT(flow_act);
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err = 0;
- u8 ipv;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return ERR_PTR(-ENOMEM);
-
- if (proto) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
- }
-
- ipv = mlx5e_etype_to_ipv(etype);
- if (match_ipv_outer && ipv) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
- } else if (etype) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
- }
-
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
- }
-
- kvfree(spec);
- return err ? ERR_PTR(err) : rule;
-}
-
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
- struct ttc_params *params,
- struct mlx5e_ttc_table *ttc)
-{
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_handle **trules;
- struct mlx5e_ttc_rule *rules;
- struct mlx5_flow_table *ft;
- int tt;
- int err;
-
- ft = ttc->ft.t;
- rules = ttc->rules;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
- struct mlx5e_ttc_rule *rule = &rules[tt];
-
- if (tt == MLX5E_TT_ANY)
- dest.tir_num = params->any_tt_tirn;
- else
- dest.tir_num = params->indir_tirn[tt];
-
- rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
- ttc_rules[tt].etype,
- ttc_rules[tt].proto);
- if (IS_ERR(rule->rule)) {
- err = PTR_ERR(rule->rule);
- rule->rule = NULL;
- goto del_rules;
- }
- rule->default_dest = dest;
- }
-
- if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
- return 0;
-
- trules = ttc->tunnel_rules;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = params->inner_ttc->ft.t;
- for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
- if (!mlx5e_tunnel_proto_supported_rx(priv->mdev,
- ttc_tunnel_rules[tt].proto))
- continue;
- trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
- ttc_tunnel_rules[tt].etype,
- ttc_tunnel_rules[tt].proto);
- if (IS_ERR(trules[tt])) {
- err = PTR_ERR(trules[tt]);
- trules[tt] = NULL;
- goto del_rules;
- }
- }
-
- return 0;
-
-del_rules:
- mlx5e_cleanup_ttc_rules(ttc);
- return err;
-}
-
-static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
- bool use_ipv)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5e_flow_table *ft = &ttc->ft;
- int ix = 0;
- u32 *in;
- int err;
- u8 *mc;
-
- ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
- sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g)
- return -ENOMEM;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- kfree(ft->g);
- ft->g = NULL;
- return -ENOMEM;
- }
-
- /* L4 Group */
- mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- if (use_ipv)
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
- else
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_TTC_GROUP1_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* L3 Group */
- MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_TTC_GROUP2_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Any Group */
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_TTC_GROUP3_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- kvfree(in);
- return 0;
-
-err:
- err = PTR_ERR(ft->g[ft->num_groups]);
- ft->g[ft->num_groups] = NULL;
- kvfree(in);
-
- return err;
-}
-
-static struct mlx5_flow_handle *
-mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_destination *dest,
- u16 etype, u8 proto)
-{
- MLX5_DECLARE_FLOW_ACT(flow_act);
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err = 0;
- u8 ipv;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return ERR_PTR(-ENOMEM);
-
- ipv = mlx5e_etype_to_ipv(etype);
- if (etype && ipv) {
- spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
- }
-
- if (proto) {
- spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
- }
-
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
- }
-
- kvfree(spec);
- return err ? ERR_PTR(err) : rule;
-}
-
-static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
- struct ttc_params *params,
- struct mlx5e_ttc_table *ttc)
-{
- struct mlx5_flow_destination dest = {};
- struct mlx5e_ttc_rule *rules;
- struct mlx5_flow_table *ft;
- int err;
- int tt;
-
- ft = ttc->ft.t;
- rules = ttc->rules;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-
- for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
- struct mlx5e_ttc_rule *rule = &rules[tt];
-
- if (tt == MLX5E_TT_ANY)
- dest.tir_num = params->any_tt_tirn;
- else
- dest.tir_num = params->indir_tirn[tt];
-
- rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
- ttc_rules[tt].etype,
- ttc_rules[tt].proto);
- if (IS_ERR(rule->rule)) {
- err = PTR_ERR(rule->rule);
- rule->rule = NULL;
- goto del_rules;
- }
- rule->default_dest = dest;
- }
-
- return 0;
-
-del_rules:
-
- mlx5e_cleanup_ttc_rules(ttc);
- return err;
-}
-
-static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5e_flow_table *ft = &ttc->ft;
- int ix = 0;
- u32 *in;
- int err;
- u8 *mc;
-
- ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g)
- return -ENOMEM;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- kfree(ft->g);
- ft->g = NULL;
- return -ENOMEM;
- }
-
- /* L4 Group */
- mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
- MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_INNER_TTC_GROUP1_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* L3 Group */
- MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_INNER_TTC_GROUP2_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Any Group */
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_INNER_TTC_GROUP3_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- kvfree(in);
- return 0;
-
-err:
- err = PTR_ERR(ft->g[ft->num_groups]);
- ft->g[ft->num_groups] = NULL;
- kvfree(in);
-
- return err;
-}
-
-void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
- struct ttc_params *ttc_params)
-{
- ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
- ttc_params->inner_ttc = &priv->fs.inner_ttc;
-}
-
-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+ int tt;
- ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
+ memset(ttc_params, 0, sizeof(*ttc_params));
+ ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
+
+ for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ ttc_params->dests[tt].tir_num =
+ tt == MLX5_TT_ANY ?
+ mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
+ tt);
+ }
}
-void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
+void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params, bool tunnel)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+ int tt;
- ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
+ memset(ttc_params, 0, sizeof(*ttc_params));
+ ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
-}
-
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc)
-{
- struct mlx5e_flow_table *ft = &ttc->ft;
- int err;
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
- return 0;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
- if (IS_ERR(ft->t)) {
- err = PTR_ERR(ft->t);
- ft->t = NULL;
- return err;
+ for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ ttc_params->dests[tt].tir_num =
+ tt == MLX5_TT_ANY ?
+ mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
}
- err = mlx5e_create_inner_ttc_table_groups(ttc);
- if (err)
- goto err;
-
- err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
- if (err)
- goto err;
-
- return 0;
-
-err:
- mlx5e_destroy_flow_table(ft);
- return err;
-}
-
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc)
-{
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ ttc_params->inner_ttc = tunnel;
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
return;
- mlx5e_cleanup_ttc_rules(ttc);
- mlx5e_destroy_flow_table(&ttc->ft);
-}
-
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
- struct mlx5e_ttc_table *ttc)
-{
- mlx5e_cleanup_ttc_rules(ttc);
- mlx5e_destroy_flow_table(&ttc->ft);
-}
-
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
- struct mlx5e_ttc_table *ttc)
-{
- bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
- struct mlx5e_flow_table *ft = &ttc->ft;
- int err;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
- if (IS_ERR(ft->t)) {
- err = PTR_ERR(ft->t);
- ft->t = NULL;
- return err;
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ ttc_params->tunnel_dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->tunnel_dests[tt].ft =
+ mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
}
-
- err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
- if (err)
- goto err;
-
- err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
- if (err)
- goto err;
-
- return 0;
-err:
- mlx5e_destroy_flow_table(ft);
- return err;
-}
-
-int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
- struct mlx5_flow_destination *new_dest)
-{
- return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
-}
-
-struct mlx5_flow_destination
-mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
-{
- struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
-
- WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
- "TTC[%d] default dest is not setup yet", type);
-
- return *dest;
-}
-
-int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
-{
- struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
-
- return mlx5e_ttc_fwd_dest(priv, type, &dest);
}
static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
@@ -1473,7 +939,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.ttc.ft.t;
+ dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
switch (type) {
case MLX5E_FULLMATCH:
@@ -1769,10 +1235,47 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
kvfree(priv->fs.vlan);
}
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
+static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+{
+ if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ return;
+ mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+}
+
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_ttc_table(priv->fs.ttc);
+}
+
+static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct ttc_params ttc_params = {};
- int tt, err;
+
+ if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ return 0;
+
+ mlx5e_set_inner_ttc_params(priv, &ttc_params);
+ priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
+ &ttc_params);
+ if (IS_ERR(priv->fs.inner_ttc))
+ return PTR_ERR(priv->fs.inner_ttc);
+ return 0;
+}
+
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+ struct ttc_params ttc_params = {};
+
+ mlx5e_set_ttc_params(priv, &ttc_params, true);
+ priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(priv->fs.ttc))
+ return PTR_ERR(priv->fs.ttc);
+ return 0;
+}
+
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
+{
+ int err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -1787,23 +1290,15 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- mlx5e_set_ttc_basic_params(priv, &ttc_params);
- mlx5e_set_inner_ttc_ft_params(&ttc_params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
-
- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
+ err = mlx5e_create_inner_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
+ netdev_err(priv->netdev,
+ "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- mlx5e_set_ttc_ft_params(&ttc_params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
-
- err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
+ err = mlx5e_create_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -1837,9 +1332,9 @@ err_destory_vlan_table:
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_ttc_table(priv);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
+ mlx5e_destroy_inner_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -1851,8 +1346,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_ptp_free_rx_fs(priv);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
+ mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index b416a8ee2eed..03693fa74a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -35,11 +35,19 @@
#include "en/params.h"
#include "en/xsk/pool.h"
+static int flow_type_to_traffic_type(u32 flow_type);
+
+static u32 flow_type_mask(u32 flow_type)
+{
+ return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+}
+
struct mlx5e_ethtool_rule {
struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5e_rss *rss;
};
static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
@@ -66,7 +74,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
int table_size;
int prio;
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ switch (flow_type_mask(fs->flow_type)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case TCP_V6_FLOW:
@@ -329,7 +337,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
outer_headers);
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
- u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ u32 flow_type = flow_type_mask(fs->flow_type);
switch (flow_type) {
case TCP_V4_FLOW:
@@ -397,10 +405,53 @@ static bool outer_header_zero(u32 *match_criteria)
size - 1);
}
+static int flow_get_tirn(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *eth_rule,
+ struct ethtool_rx_flow_spec *fs,
+ u32 rss_context, u32 *tirn)
+{
+ if (fs->flow_type & FLOW_RSS) {
+ struct mlx5e_lro_param lro_param;
+ struct mlx5e_rss *rss;
+ u32 flow_type;
+ int err;
+ int tt;
+
+ rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
+ if (!rss)
+ return -ENOENT;
+
+ flow_type = flow_type_mask(fs->flow_type);
+ tt = flow_type_to_traffic_type(flow_type);
+ if (tt < 0)
+ return -EINVAL;
+
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
+ err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn);
+ if (err)
+ return err;
+ eth_rule->rss = rss;
+ mlx5e_rss_refcnt_inc(eth_rule->rss);
+ } else {
+ struct mlx5e_params *params = &priv->channels.params;
+ enum mlx5e_rq_group group;
+ u16 ix;
+
+ mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
+
+ *tirn = group == MLX5E_RQ_GROUP_XSK ?
+ mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
+ mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
+ }
+
+ return 0;
+}
+
static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *eth_rule,
struct mlx5_flow_table *ft,
- struct ethtool_rx_flow_spec *fs)
+ struct ethtool_rx_flow_spec *fs, u32 rss_context)
{
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
struct mlx5_flow_destination *dst = NULL;
@@ -419,22 +470,17 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
- struct mlx5e_params *params = &priv->channels.params;
- enum mlx5e_rq_group group;
- struct mlx5e_tir *tir;
- u16 ix;
-
- mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
- tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir;
-
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
err = -ENOMEM;
goto free;
}
+ err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
+ if (err)
+ goto free;
+
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst->tir_num = tir[ix].tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
@@ -458,6 +504,8 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
{
if (eth_rule->rule)
mlx5_del_flow_rules(eth_rule->rule);
+ if (eth_rule->rss)
+ mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
@@ -618,7 +666,7 @@ static int validate_flow(struct mlx5e_priv *priv,
fs->ring_cookie))
return -EINVAL;
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ switch (flow_type_mask(fs->flow_type)) {
case ETHER_FLOW:
num_tuples += validate_ethter(fs);
break;
@@ -667,7 +715,7 @@ static int validate_flow(struct mlx5e_priv *priv,
static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
- struct ethtool_rx_flow_spec *fs)
+ struct ethtool_rx_flow_spec *fs, u32 rss_context)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
@@ -698,7 +746,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
err = -EINVAL;
goto del_ethtool_rule;
}
- rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
+ rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto del_ethtool_rule;
@@ -744,10 +792,20 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
return -EINVAL;
list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
- if (eth_rule->flow_spec.location == location) {
- info->fs = eth_rule->flow_spec;
+ int index;
+
+ if (eth_rule->flow_spec.location != location)
+ continue;
+ if (!info)
return 0;
- }
+ info->fs = eth_rule->flow_spec;
+ if (!eth_rule->rss)
+ return 0;
+ index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
+ if (index < 0)
+ return index;
+ info->rss_context = index;
+ return 0;
}
return -ENOENT;
@@ -763,7 +821,7 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
info->data = MAX_NUM_OF_ETHTOOL_RULES;
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
- err = mlx5e_ethtool_get_flow(priv, info, location);
+ err = mlx5e_ethtool_get_flow(priv, NULL, location);
if (!err)
rule_locs[idx++] = location;
location++;
@@ -785,45 +843,44 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
-static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
+static int flow_type_to_traffic_type(u32 flow_type)
{
switch (flow_type) {
case TCP_V4_FLOW:
- return MLX5E_TT_IPV4_TCP;
+ return MLX5_TT_IPV4_TCP;
case TCP_V6_FLOW:
- return MLX5E_TT_IPV6_TCP;
+ return MLX5_TT_IPV6_TCP;
case UDP_V4_FLOW:
- return MLX5E_TT_IPV4_UDP;
+ return MLX5_TT_IPV4_UDP;
case UDP_V6_FLOW:
- return MLX5E_TT_IPV6_UDP;
+ return MLX5_TT_IPV6_UDP;
case AH_V4_FLOW:
- return MLX5E_TT_IPV4_IPSEC_AH;
+ return MLX5_TT_IPV4_IPSEC_AH;
case AH_V6_FLOW:
- return MLX5E_TT_IPV6_IPSEC_AH;
+ return MLX5_TT_IPV6_IPSEC_AH;
case ESP_V4_FLOW:
- return MLX5E_TT_IPV4_IPSEC_ESP;
+ return MLX5_TT_IPV4_IPSEC_ESP;
case ESP_V6_FLOW:
- return MLX5E_TT_IPV6_IPSEC_ESP;
+ return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
- return MLX5E_TT_IPV4;
+ return MLX5_TT_IPV4;
case IPV6_FLOW:
- return MLX5E_TT_IPV6;
+ return MLX5_TT_IPV6;
default:
- return MLX5E_NUM_INDIR_TIRS;
+ return -EINVAL;
}
}
static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
- int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- enum mlx5e_traffic_types tt;
u8 rx_hash_field = 0;
- void *in;
+ int err;
+ int tt;
tt = flow_type_to_traffic_type(nfc->flow_type);
- if (tt == MLX5E_NUM_INDIR_TIRS)
- return -EINVAL;
+ if (tt < 0)
+ return tt;
/* RSS does not support anything other than hashing to queues
* on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
@@ -848,35 +905,24 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
if (nfc->data & RXH_L4_B_2_3)
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
mutex_lock(&priv->state_lock);
-
- if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
- goto out;
-
- priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
- mlx5e_modify_tirs_hash(priv, in);
-
-out:
+ err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
mutex_unlock(&priv->state_lock);
- kvfree(in);
- return 0;
+
+ return err;
}
static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
- enum mlx5e_traffic_types tt;
u32 hash_field = 0;
+ int tt;
tt = flow_type_to_traffic_type(nfc->flow_type);
- if (tt == MLX5E_NUM_INDIR_TIRS)
- return -EINVAL;
+ if (tt < 0)
+ return tt;
- hash_field = priv->rss_params.rx_hash_fields[tt];
+ hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
nfc->data = 0;
if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
@@ -898,7 +944,7 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
- err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
break;
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 24f919ef9b8e..26d2f78c7706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1619,7 +1619,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
- MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
@@ -1711,7 +1711,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
{
int err, tc;
- for (tc = 0; tc < params->num_tc; tc++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
@@ -1992,7 +1992,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
- c->num_tc = params->num_tc;
+ c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
@@ -2185,400 +2185,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
chs->num = 0;
}
-static int
-mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqtc;
- int inlen;
- int err;
- u32 *in;
- int i;
-
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
-
- MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
-
- for (i = 0; i < sz; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
-
- err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
- if (!err)
- rqt->enabled = true;
-
- kvfree(in);
- return err;
-}
-
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
-{
- rqt->enabled = false;
- mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
-}
-
-int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
-{
- struct mlx5e_rqt *rqt = &priv->indir_rqt;
- int err;
-
- err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
- if (err)
- mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
- return err;
-}
-
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
-{
- int err;
- int ix;
-
- for (ix = 0; ix < n; ix++) {
- err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
- if (unlikely(err))
- goto err_destroy_rqts;
- }
-
- return 0;
-
-err_destroy_rqts:
- mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
- for (ix--; ix >= 0; ix--)
- mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
-
- return err;
-}
-
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
-{
- int i;
-
- for (i = 0; i < n; i++)
- mlx5e_destroy_rqt(priv, &tirs[i].rqt);
-}
-
-static int mlx5e_rx_hash_fn(int hfunc)
-{
- return (hfunc == ETH_RSS_HASH_TOP) ?
- MLX5_RX_HASH_FN_TOEPLITZ :
- MLX5_RX_HASH_FN_INVERTED_XOR8;
-}
-
-int mlx5e_bits_invert(unsigned long a, int size)
-{
- int inv = 0;
- int i;
-
- for (i = 0; i < size; i++)
- inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
-
- return inv;
-}
-
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
- struct mlx5e_redirect_rqt_param rrp, void *rqtc)
-{
- int i;
-
- for (i = 0; i < sz; i++) {
- u32 rqn;
-
- if (rrp.is_rss) {
- int ix = i;
-
- if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, ilog2(sz));
-
- ix = priv->rss_params.indirection_rqt[ix];
- rqn = rrp.rss.channels->c[ix]->rq.rqn;
- } else {
- rqn = rrp.rqn;
- }
- MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
- }
-}
-
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
- struct mlx5e_redirect_rqt_param rrp)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqtc;
- int inlen;
- u32 *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-
- MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
- mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
- err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
-
- kvfree(in);
- return err;
-}
-
-static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
- struct mlx5e_redirect_rqt_param rrp)
-{
- if (!rrp.is_rss)
- return rrp.rqn;
-
- if (ix >= rrp.rss.channels->num)
- return priv->drop_rq.rqn;
-
- return rrp.rss.channels->c[ix]->rq.rqn;
-}
-
-static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
- struct mlx5e_redirect_rqt_param rrp,
- struct mlx5e_redirect_rqt_param *ptp_rrp)
-{
- u32 rqtn;
- int ix;
-
- if (priv->indir_rqt.enabled) {
- /* RSS RQ table */
- rqtn = priv->indir_rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
- }
-
- for (ix = 0; ix < priv->max_nch; ix++) {
- struct mlx5e_redirect_rqt_param direct_rrp = {
- .is_rss = false,
- {
- .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
- },
- };
-
- /* Direct RQ Tables */
- if (!priv->direct_tir[ix].rqt.enabled)
- continue;
-
- rqtn = priv->direct_tir[ix].rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
- }
- if (ptp_rrp) {
- rqtn = priv->ptp_tir.rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp);
- }
-}
-
-static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *chs)
-{
- bool rx_ptp_support = priv->profile->rx_ptp_support;
- struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL;
- struct mlx5e_redirect_rqt_param rrp = {
- .is_rss = true,
- {
- .rss = {
- .channels = chs,
- .hfunc = priv->rss_params.hfunc,
- }
- },
- };
- struct mlx5e_redirect_rqt_param ptp_rrp;
-
- if (rx_ptp_support) {
- u32 ptp_rqn;
-
- ptp_rrp.is_rss = false;
- ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ?
- priv->drop_rq.rqn : ptp_rqn;
- ptp_rrp_p = &ptp_rrp;
- }
- mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p);
-}
-
-static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
-{
- bool rx_ptp_support = priv->profile->rx_ptp_support;
- struct mlx5e_redirect_rqt_param drop_rrp = {
- .is_rss = false,
- {
- .rqn = priv->drop_rq.rqn,
- },
- };
-
- mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL);
-}
-
-static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
- [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
- .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
- },
- [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
- },
- [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP,
- },
- [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
- .l4_prot_type = 0,
- .rx_hash_fields = MLX5_HASH_IP,
- },
-};
-
-struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
-{
- return tirc_default_config[tt];
-}
-
-static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
-{
- if (!params->lro_en)
- return;
-
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
-
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
-}
-
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
- const struct mlx5e_tirc_config *ttconfig,
- void *tirc, bool inner)
-{
- void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
- MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
- MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
- if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
- void *rss_key = MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key);
- size_t len = MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key);
-
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, rss_params->toeplitz_hash_key, len);
- }
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- ttconfig->l3_prot_type);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- ttconfig->l4_prot_type);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- ttconfig->rx_hash_fields);
-}
-
-static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
- enum mlx5e_traffic_types tt,
- u32 rx_hash_fields)
-{
- *ttconfig = tirc_default_config[tt];
- ttconfig->rx_hash_fields = rx_hash_fields;
-}
-
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
-{
- void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- struct mlx5e_rss_params *rss = &priv->rss_params;
- struct mlx5_core_dev *mdev = priv->mdev;
- int ctxlen = MLX5_ST_SZ_BYTES(tirc);
- struct mlx5e_tirc_config ttconfig;
- int tt;
-
- MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_update_rx_hash_fields(&ttconfig, tt,
- rss->rx_hash_fields[tt]);
- mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
- }
-
- /* Verify inner tirs resources allocated */
- if (!priv->inner_indir_tir[0].tirn)
- return;
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_update_rx_hash_fields(&ttconfig, tt,
- rss->rx_hash_fields[tt]);
- mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
- }
-}
-
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
-
- void *in;
- void *tirc;
- int inlen;
- int err;
- int tt;
- int ix;
-
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
- tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+ struct mlx5e_rx_res *res = priv->rx_res;
+ struct mlx5e_lro_param lro_param;
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
- if (err)
- goto free_in;
- }
-
- for (ix = 0; ix < priv->max_nch; ix++) {
- err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
- if (err)
- goto free_in;
- }
-
-free_in:
- kvfree(in);
-
- return err;
+ return mlx5e_rx_res_lro_set_param(res, &lro_param);
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
@@ -2649,22 +2263,34 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
ETH_MAX_MTU);
}
-static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
+static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
+ struct tc_mqprio_qopt_offload *mqprio)
{
- int tc;
+ int tc, err;
netdev_reset_tc(netdev);
if (ntc == 1)
- return;
+ return 0;
- netdev_set_num_tc(netdev, ntc);
+ err = netdev_set_num_tc(netdev, ntc);
+ if (err) {
+ netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc);
+ return err;
+ }
- /* Map netdev TCs to offset 0
- * We have our own UP to TXQ mapping for QoS
- */
- for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, 0);
+ for (tc = 0; tc < ntc; tc++) {
+ u16 count, offset;
+
+ /* For DCB mode, map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
+ count = mqprio ? mqprio->qopt.count[tc] : nch;
+ offset = mqprio ? mqprio->qopt.offset[tc] : 0;
+ netdev_set_tc_queue(netdev, tc, count, offset);
+ }
+
+ return 0;
}
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
@@ -2674,7 +2300,7 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
nch = priv->channels.params.num_channels;
- ntc = priv->channels.params.num_tc;
+ ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
num_txqs = nch * ntc + qos_queues;
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
num_txqs += ntc;
@@ -2698,11 +2324,12 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
- ntc = priv->channels.params.num_tc;
+ ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
num_rxqs = nch * priv->profile->rq_groups;
- mlx5e_netdev_set_tcs(netdev, nch, ntc);
-
+ err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);
+ if (err)
+ goto err_out;
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
@@ -2723,7 +2350,8 @@ err_txqs:
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
err_tcs:
- mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
+ mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);
+err_out:
return err;
}
@@ -2759,9 +2387,9 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
+ /* This function may be called on attach, before priv->rx_res is created. */
+ if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
+ mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
return 0;
}
@@ -2773,7 +2401,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
int i, ch, tc, num_tc;
ch = priv->channels.num;
- num_tc = priv->channels.params.num_tc;
+ num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
for (i = 0; i < ch; i++) {
for (tc = 0; tc < num_tc; tc++) {
@@ -2804,7 +2432,7 @@ static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
{
/* Sync with mlx5e_select_queue. */
WRITE_ONCE(priv->num_tc_x_num_ch,
- priv->channels.params.num_tc * priv->channels.num);
+ mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num);
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
@@ -2820,16 +2448,15 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
- mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
- mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
+ if (priv->rx_res)
+ mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
- mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
-
- mlx5e_redirect_rqts_to_drop(priv);
+ if (priv->rx_res)
+ mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -3204,224 +2831,152 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
-static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
- u32 rqtn, u32 *tirc)
-{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, rqtn);
- MLX5_SET(tirc, tirc, tunneled_offload_en,
- priv->channels.params.tunneled_offload_en);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
-}
-
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
+static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
{
- mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
- &tirc_default_config[tt], tirc, false);
-}
+ int err = 0;
+ int i;
-static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
-{
- mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
-}
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
+ if (err)
+ return err;
+ }
-static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
-{
- mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
- &tirc_default_config[tt], tirc, true);
+ return 0;
}
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
+static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
- struct mlx5e_tir *tir;
- void *tirc;
- int inlen;
- int i = 0;
int err;
- u32 *in;
- int tt;
-
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(in, 0, inlen);
- tir = &priv->indir_tir[tt];
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_indir_tir_ctx(priv, tt, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in);
- if (err) {
- mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
- goto err_destroy_inner_tirs;
- }
- }
-
- if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
- goto out;
+ int i;
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
- memset(in, 0, inlen);
- tir = &priv->inner_indir_tir[i];
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in);
- if (err) {
- mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
- goto err_destroy_inner_tirs;
- }
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
+ if (err)
+ return err;
}
-
-out:
- kvfree(in);
+ if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
+ return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
return 0;
-
-err_destroy_inner_tirs:
- for (i--; i >= 0; i--)
- mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
-
- for (tt--; tt >= 0; tt--)
- mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
-
- kvfree(in);
-
- return err;
}
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
+static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
+ struct tc_mqprio_qopt *mqprio)
{
- struct mlx5e_tir *tir;
- void *tirc;
- int inlen;
- int err = 0;
- u32 *in;
- int ix;
-
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ struct mlx5e_params new_params;
+ u8 tc = mqprio->num_tc;
+ int err;
- for (ix = 0; ix < n; ix++) {
- memset(in, 0, inlen);
- tir = &tirs[ix];
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in);
- if (unlikely(err))
- goto err_destroy_ch_tirs;
- }
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- goto out;
+ if (tc && tc != MLX5E_MAX_NUM_TC)
+ return -EINVAL;
-err_destroy_ch_tirs:
- mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
- for (ix--; ix >= 0; ix--)
- mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
+ new_params = priv->channels.params;
+ new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;
+ new_params.mqprio.num_tc = tc ? tc : 1;
-out:
- kvfree(in);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_num_channels_changed_ctx, NULL, true);
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+ mlx5e_get_dcb_num_tc(&priv->channels.params));
return err;
}
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
{
+ struct net_device *netdev = priv->netdev;
+ int agg_count = 0;
int i;
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
-
- /* Verify inner tirs resources allocated */
- if (!priv->inner_indir_tir[0].tirn)
- return;
-
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
-}
-
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n)
-{
- int i;
+ if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
+ mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
+ return -EINVAL;
- for (i = 0; i < n; i++)
- mlx5e_destroy_tir(priv->mdev, &tirs[i]);
-}
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (!mqprio->qopt.count[i]) {
+ netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i);
+ return -EINVAL;
+ }
+ if (mqprio->min_rate[i]) {
+ netdev_err(netdev, "Min tx rate is not supported\n");
+ return -EINVAL;
+ }
+ if (mqprio->max_rate[i]) {
+ netdev_err(netdev, "Max tx rate is not supported\n");
+ return -EINVAL;
+ }
-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
-{
- int err = 0;
- int i;
+ if (mqprio->qopt.offset[i] != agg_count) {
+ netdev_err(netdev, "Discontinuous queues config is not supported\n");
+ return -EINVAL;
+ }
+ agg_count += mqprio->qopt.count[i];
+ }
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
- if (err)
- return err;
+ if (priv->channels.params.num_channels < agg_count) {
+ netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
+ agg_count, priv->channels.params.num_channels);
+ return -EINVAL;
}
return 0;
}
-static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
+static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)
{
- int err;
- int i;
+ struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;
+ struct net_device *netdev = priv->netdev;
+ u8 num_tc;
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
- if (err)
- return err;
- }
- if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
- return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
+ if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
+ return -EINVAL;
+
+ num_tc = priv->channels.params.mqprio.num_tc;
+ mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);
return 0;
}
-static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
- struct tc_mqprio_qopt *mqprio)
+static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
{
struct mlx5e_params new_params;
- u8 tc = mqprio->num_tc;
- int err = 0;
+ int err;
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ err = mlx5e_mqprio_channel_validate(priv, mqprio);
+ if (err)
+ return err;
- if (tc && tc != MLX5E_MAX_NUM_TC)
- return -EINVAL;
+ new_params = priv->channels.params;
+ new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
+ new_params.mqprio.num_tc = mqprio->qopt.num_tc;
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);
- mutex_lock(&priv->state_lock);
+ return err;
+}
+static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(priv->htb.maj_id)) {
- err = -EINVAL;
- goto out;
- }
-
- new_params = priv->channels.params;
- new_params.num_tc = tc ? tc : 1;
-
- err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ if (WARN_ON(priv->htb.maj_id))
+ return -EINVAL;
-out:
- priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
- priv->channels.params.num_tc);
- mutex_unlock(&priv->state_lock);
- return err;
+ switch (mqprio->mode) {
+ case TC_MQPRIO_MODE_DCB:
+ return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt);
+ case TC_MQPRIO_MODE_CHANNEL:
+ return mlx5e_setup_tc_mqprio_channel(priv, mqprio);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
@@ -3493,7 +3048,10 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
priv, priv, true);
}
case TC_SETUP_QDISC_MQPRIO:
- return mlx5e_setup_tc_mqprio(priv, type_data);
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_setup_tc_mqprio(priv, type_data);
+ mutex_unlock(&priv->state_lock);
+ return err;
case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock);
err = mlx5e_setup_tc_htb(priv, type_data);
@@ -4582,7 +4140,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_nic_mtu,
- .ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_eth_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
@@ -4611,15 +4169,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_devlink_port = mlx5e_get_devlink_port,
};
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
- int num_channels)
-{
- int i;
-
- for (i = 0; i < len; i++)
- indirection_rqt[i] = i % num_channels;
-}
-
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -4632,24 +4181,8 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
-void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
- u16 num_channels)
-{
- enum mlx5e_traffic_types tt;
-
- rss_params->hfunc = ETH_RSS_HASH_TOP;
- netdev_rss_key_fill(rss_params->toeplitz_hash_key,
- sizeof(rss_params->toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, num_channels);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- rss_params->rx_hash_fields[tt] =
- tirc_default_config[tt].rx_hash_fields;
-}
-
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
- struct mlx5e_rss_params *rss_params = &priv->rss_params;
struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
@@ -4660,12 +4193,12 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
priv->max_nch);
- params->num_tc = 1;
+ params->mqprio.num_tc = 1;
/* Set an initial non-zero value, so that mlx5e_select_queue won't
* divide by zero if called before first activating channels.
*/
- priv->num_tc_x_num_ch = params->num_channels * params->num_tc;
+ priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
@@ -4709,10 +4242,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
- /* RSS */
- mlx5e_build_rss_params(rss_params, params->num_channels);
- params->tunneled_offload_en =
- mlx5e_tunnel_inner_ft_supported(mdev);
+ params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
/* AF_XDP */
params->xsk = xsk;
@@ -4772,8 +4302,8 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
{
int tt;
- for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
- if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt)))
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt)))
return true;
}
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
@@ -4812,7 +4342,14 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ /* Tunneled LRO is not supported in the driver, and the same RQs are
+ * shared between inner and outer TIRs, so the driver can't disable LRO
+ * for inner TIRs while having it enabled for outer TIRs. Due to this,
+ * block LRO altogether if the firmware declares tunneled LRO support.
+ */
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
+ !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
+ !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
mlx5e_check_fragmented_striding_rq_cap(mdev))
netdev->vlan_features |= NETIF_F_LRO;
@@ -4939,7 +4476,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct devlink_port *dl_port;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -4955,19 +4491,13 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
- dl_port = mlx5e_devlink_get_dl_port(priv);
- if (dl_port->registered)
- mlx5e_health_create_reporters(priv);
-
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
-
- if (dl_port->registered)
- mlx5e_health_destroy_reporters(priv);
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
}
@@ -4975,9 +4505,14 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_nch = priv->max_nch;
+ enum mlx5e_rx_res_features features;
+ struct mlx5e_lro_param lro_param;
int err;
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res)
+ return -ENOMEM;
+
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -4986,42 +4521,20 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- err = mlx5e_create_indirect_rqt(priv);
+ features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
+ if (priv->channels.params.tunneled_offload_en)
+ features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
+ err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
+ priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
- if (err)
- goto err_destroy_indirect_rqts;
-
- err = mlx5e_create_indirect_tirs(priv, true);
- if (err)
- goto err_destroy_direct_rqts;
-
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
- if (err)
- goto err_destroy_indirect_tirs;
-
- err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch);
- if (unlikely(err))
- goto err_destroy_direct_tirs;
-
- err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch);
- if (unlikely(err))
- goto err_destroy_xsk_rqts;
-
- err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1);
- if (err)
- goto err_destroy_xsk_tirs;
-
- err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1);
- if (err)
- goto err_destroy_ptp_rqt;
-
err = mlx5e_create_flow_steering(priv);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
- goto err_destroy_ptp_direct_tir;
+ goto err_destroy_rx_res;
}
err = mlx5e_tc_nic_init(priv);
@@ -5042,46 +4555,27 @@ err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
-err_destroy_ptp_direct_tir:
- mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
-err_destroy_ptp_rqt:
- mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
-err_destroy_xsk_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
-err_destroy_xsk_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
-err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
-err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv);
-err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
-err_destroy_indirect_rqts:
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+err_destroy_rx_res:
+ mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
+ mlx5e_rx_res_free(priv->rx_res);
+ priv->rx_res = NULL;
return err;
}
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
- u16 max_nch = priv->max_nch;
-
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
- mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
- mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
- mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
+ mlx5e_rx_res_free(priv->rx_res);
+ priv->rx_res = NULL;
}
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index bf94bcb6fa5d..eb83f27850c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -49,6 +49,7 @@
#include "en/devlink.h"
#include "fs_core.h"
#include "lib/mlx5.h"
+#include "lib/devcom.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
@@ -310,6 +311,8 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ if (rep_sq->send_to_vport_rule_peer)
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
list_del(&rep_sq->list);
kfree(rep_sq);
}
@@ -319,6 +322,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u32 *sqns_array, int sqns_num)
{
+ struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
@@ -329,6 +333,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
return 0;
rpriv = mlx5e_rep_to_rep_priv(rep);
+ if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS);
+
for (i = 0; i < sqns_num; i++) {
rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
if (!rep_sq) {
@@ -337,7 +345,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
}
/* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep,
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -345,12 +353,34 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
goto out_err;
}
rep_sq->send_to_vport_rule = flow_rule;
+ rep_sq->sqn = sqns_array[i];
+
+ if (peer_esw) {
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
+ rep, sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ kfree(rep_sq);
+ goto out_err;
+ }
+ rep_sq->send_to_vport_rule_peer = flow_rule;
+ }
+
list_add(&rep_sq->list, &rpriv->vport_sqs_list);
}
+
+ if (peer_esw)
+ mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
return 0;
out_err:
mlx5e_sqs2vport_stop(esw, rep);
+
+ if (peer_esw)
+ mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
return err;
}
@@ -364,7 +394,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
int err = -ENOMEM;
u32 *sqs;
- sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
+ sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
+ sizeof(*sqs), GFP_KERNEL);
if (!sqs)
goto out;
@@ -581,13 +612,10 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- params->num_tc = 1;
+ params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
-
- /* RSS */
- mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev,
@@ -651,25 +679,23 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct ttc_params ttc_params = {};
- int tt, err;
+ int err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
- ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
- mlx5e_set_ttc_ft_params(&ttc_params);
+ mlx5e_set_ttc_params(priv, &ttc_params, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
-
- err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
- if (err) {
- netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
+ priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(priv->fs.ttc)) {
+ err = PTR_ERR(priv->fs.ttc);
+ netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
+ err);
return err;
}
return 0;
@@ -687,7 +713,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = priv->fs.ttc.ft.t;
+ rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
return 0;
}
@@ -760,9 +786,13 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_nch = priv->max_nch;
+ struct mlx5e_lro_param lro_param;
int err;
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res)
+ return -ENOMEM;
+
mlx5e_init_l2_addr(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -771,25 +801,16 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5e_create_indirect_rqt(priv);
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
+ err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
+ priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
- if (err)
- goto err_destroy_indirect_rqts;
-
- err = mlx5e_create_indirect_tirs(priv, false);
- if (err)
- goto err_destroy_direct_rqts;
-
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
- if (err)
- goto err_destroy_indirect_tirs;
-
err = mlx5e_create_rep_ttc_table(priv);
if (err)
- goto err_destroy_direct_tirs;
+ goto err_destroy_rx_res;
err = mlx5e_create_rep_root_ft(priv);
if (err)
@@ -806,33 +827,26 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
-err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
-err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv);
-err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
-err_destroy_indirect_rqts:
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5_destroy_ttc_table(priv->fs.ttc);
+err_destroy_rx_res:
+ mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_rx_res_free(priv->rx_res);
+ priv->rx_res = NULL;
return err;
}
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- u16 max_nch = priv->max_nch;
-
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_rx_res_free(priv->rx_res);
+ priv->rx_res = NULL;
}
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
@@ -1264,10 +1278,64 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev;
}
+static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_sq *rep_sq;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
+ if (!rep_sq->send_to_vport_rule_peer)
+ continue;
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
+ rep_sq->send_to_vport_rule_peer = NULL;
+ }
+}
+
+static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ struct mlx5_eswitch *peer_esw)
+{
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_sq *rep_sq;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
+ if (rep_sq->send_to_vport_rule_peer)
+ continue;
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
+ if (IS_ERR(flow_rule))
+ goto err_out;
+ rep_sq->send_to_vport_rule_peer = flow_rule;
+ }
+
+ return 0;
+err_out:
+ mlx5e_vport_rep_event_unpair(rep);
+ return PTR_ERR(flow_rule);
+}
+
+static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data)
+{
+ int err = 0;
+
+ if (event == MLX5_SWITCHDEV_EVENT_PAIR)
+ err = mlx5e_vport_rep_event_pair(esw, rep, data);
+ else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
+ mlx5e_vport_rep_event_unpair(rep);
+
+ return err;
+}
+
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5e_vport_rep_load,
.unload = mlx5e_vport_rep_unload,
- .get_proto_dev = mlx5e_vport_rep_get_proto_dev
+ .get_proto_dev = mlx5e_vport_rep_get_proto_dev,
+ .event = mlx5e_vport_rep_event,
};
static int mlx5e_rep_probe(struct auxiliary_device *adev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 47a2dfb7792a..756f806401d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -146,7 +146,7 @@ struct mlx5e_neigh_hash_entry {
*/
refcount_t refcnt;
- /* Save the last reported time offloaded trafic pass over one of the
+ /* Save the last reported time offloaded traffic pass over one of the
* neigh hash entry flows. Use it to periodically update the neigh
* 'used' value and avoid neigh deleting by the kernel.
*/
@@ -207,6 +207,8 @@ struct mlx5e_encap_entry {
struct mlx5e_rep_sq {
struct mlx5_flow_handle *send_to_vport_rule;
+ struct mlx5_flow_handle *send_to_vport_rule_peer;
+ u32 sqn;
struct list_head list;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d273758255c3..9465a51b6e66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -34,19 +34,13 @@
#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
-#include <net/tc_act/tc_gact.h>
-#include <net/tc_act/tc_skbedit.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
-#include <net/tc_act/tc_mirred.h>
-#include <net/tc_act/tc_vlan.h>
-#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
-#include <net/tc_act/tc_mpls.h>
#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
@@ -103,7 +97,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
- /* For NIC rules we store the retore metadata directly
+ /* For NIC rules we store the restore metadata directly
* into reg_b that is passed to SW since we don't
* jump between steering domains.
*/
@@ -340,12 +334,12 @@ struct mlx5e_hairpin {
struct mlx5_core_dev *func_mdev;
struct mlx5e_priv *func_priv;
u32 tdn;
- u32 tirn;
+ struct mlx5e_tir direct_tir;
int num_channels;
struct mlx5e_rqt indir_rqt;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_ttc_table ttc;
+ struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5_ttc_table *ttc;
};
struct mlx5e_hairpin_entry {
@@ -482,126 +476,101 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
- u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
- void *tirc;
+ struct mlx5e_tir_builder *builder;
int err;
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
if (err)
- goto alloc_tdn_err;
-
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
- MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+ goto out;
- err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
+ mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
+ err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
if (err)
goto create_tir_err;
- return 0;
+out:
+ mlx5e_tir_builder_free(builder);
+ return err;
create_tir_err:
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
-alloc_tdn_err:
- return err;
+
+ goto out;
}
static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
{
- mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5e_tir_destroy(&hp->direct_tir);
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
-static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
-{
- struct mlx5e_priv *priv = hp->func_priv;
- int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
- u32 *indirection_rqt, rqn;
-
- indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
- if (!indirection_rqt)
- return -ENOMEM;
-
- mlx5e_build_default_indir_rqt(indirection_rqt, sz,
- hp->num_channels);
-
- for (i = 0; i < sz; i++) {
- ix = i;
- if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, ilog2(sz));
- ix = indirection_rqt[ix];
- rqn = hp->pair->rqn[ix];
- MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
- }
-
- kfree(indirection_rqt);
- return 0;
-}
-
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{
- int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
- void *rqtc;
- u32 *in;
+ struct mlx5e_rss_params_indir *indir;
+ int err;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
+ if (!indir)
return -ENOMEM;
- rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
-
- MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
-
- err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
- if (err)
- goto out;
-
- err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
- if (!err)
- hp->indir_rqt.enabled = true;
+ mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
+ err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
+ mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
+ indir);
-out:
- kvfree(in);
+ kvfree(indir);
return err;
}
static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
- u32 in[MLX5_ST_SZ_DW(create_tir_in)];
- int tt, i, err;
- void *tirc;
+ struct mlx5e_rss_params_hash rss_hash;
+ enum mlx5_traffic_types tt, max_tt;
+ struct mlx5e_tir_builder *builder;
+ int err = 0;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
+ struct mlx5e_rss_params_traffic_type rss_tt;
- memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ rss_tt = mlx5e_rss_get_default_tt_config(tt);
- MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
+ mlx5e_tir_builder_build_rqt(builder, hp->tdn,
+ mlx5e_rqt_get_rqtn(&hp->indir_rqt),
+ false);
+ mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
- err = mlx5_core_create_tir(hp->func_mdev, in,
- &hp->indir_tirn[tt]);
+ err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
}
+
+ mlx5e_tir_builder_clear(builder);
}
- return 0;
-err_destroy_tirs:
- for (i = 0; i < tt; i++)
- mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+out:
+ mlx5e_tir_builder_free(builder);
return err;
+
+err_destroy_tirs:
+ max_tt = tt;
+ for (tt = 0; tt < max_tt; tt++)
+ mlx5e_tir_destroy(&hp->indir_tir[tt]);
+
+ goto out;
}
static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
@@ -609,7 +578,7 @@ static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
int tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+ mlx5e_tir_destroy(&hp->indir_tir[tt]);
}
static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
@@ -620,12 +589,16 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->any_tt_tirn = hp->tirn;
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+ ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+ for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ ttc_params->dests[tt].tir_num =
+ tt == MLX5_TT_ANY ?
+ mlx5e_tir_get_tirn(&hp->direct_tir) :
+ mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
+ }
- ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}
@@ -645,30 +618,31 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
goto err_create_indirect_tirs;
mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
- err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
- if (err)
+ hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(hp->ttc)) {
+ err = PTR_ERR(hp->ttc);
goto err_create_ttc_table;
+ }
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
- hp->num_channels, hp->ttc.ft.t->id);
+ hp->num_channels,
+ mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
return 0;
err_create_ttc_table:
mlx5e_hairpin_destroy_indirect_tirs(hp);
err_create_indirect_tirs:
- mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+ mlx5e_rqt_destroy(&hp->indir_rqt);
return err;
}
static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
{
- struct mlx5e_priv *priv = hp->func_priv;
-
- mlx5e_destroy_ttc_table(priv, &hp->ttc);
+ mlx5_destroy_ttc_table(hp->ttc);
mlx5e_hairpin_destroy_indirect_tirs(hp);
- mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+ mlx5e_rqt_destroy(&hp->indir_rqt);
}
static struct mlx5e_hairpin *
@@ -903,16 +877,17 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
- hp->tirn, hp->pair->rqn[0],
+ mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
attach_flow:
if (hpe->hp->num_channels > 1) {
flow_flag_set(flow, HAIRPIN_RSS);
- flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ flow->attr->nic_attr->hairpin_ft =
+ mlx5_get_ttc_flow_table(hpe->hp->ttc);
} else {
- flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
}
flow->hpe = hpe;
@@ -1056,15 +1031,17 @@ err_ft_get:
static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_fc *counter = NULL;
+ struct mlx5_fc *counter;
int err;
+ parse_attr = attr->parse_attr;
+
if (flow_flag_test(flow, HAIRPIN)) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err)
@@ -1384,9 +1361,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
bool vf_tun = false, encap_valid = true;
struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
+ struct mlx5_fc *counter;
u32 max_prio, max_chain;
int err = 0;
int out_index;
@@ -2471,7 +2448,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
}
}
- /* Currenlty supported only for MPLS over UDP */
+ /* Currently supported only for MPLS over UDP */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
!netif_is_bareudp(filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2725,7 +2702,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
if (s_mask && a_mask) {
NL_SET_ERR_MSG_MOD(extack,
"can't set and add to the same HW field");
- printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
+ netdev_warn(priv->netdev,
+ "mlx5: can't set and add to the same HW field (%x)\n",
+ f->field);
return -EOPNOTSUPP;
}
@@ -2764,8 +2743,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
if (first < next_z && next_z < last) {
NL_SET_ERR_MSG_MOD(extack,
"rewrite of few sub-fields isn't supported");
- printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
- mask);
+ netdev_warn(priv->netdev,
+ "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
+ mask);
return -EOPNOTSUPP;
}
@@ -3352,10 +3332,10 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
@@ -3371,8 +3351,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
nic_attr = attr->nic_attr;
-
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ parse_attr = attr->parse_attr;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -3381,10 +3361,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
- if (MLX5_CAP_FLOWTABLE(priv->mdev,
- flow_table_properties_nic_receive.flow_counter))
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
@@ -3425,7 +3403,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
"device is not on same HW, can't offload");
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
break;
@@ -3435,7 +3413,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
"Bad flow mark - only 16 bit is supported");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
nic_attr->flow_tag = mark;
@@ -3732,8 +3710,7 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack,
- struct net_device *filter_dev)
+ struct netlink_ext_ack *extack)
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -3798,7 +3775,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
"mpls pop supported only as first action");
return -EOPNOTSUPP;
}
- if (!netif_is_bareudp(filter_dev)) {
+ if (!netif_is_bareudp(parse_attr->filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"mpls pop supported only on bareudp devices");
return -EOPNOTSUPP;
@@ -3947,7 +3924,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
"devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name,
out_dev->name);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
break;
@@ -4300,7 +4277,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
+ err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
@@ -4446,11 +4423,11 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
+ err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
- err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
+ err = mlx5e_tc_add_nic_flow(priv, flow, extack);
if (err)
goto err_free;
@@ -4877,6 +4854,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
+ u64 mapping_id;
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
@@ -4890,8 +4868,12 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
- chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj),
- MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+ mapping_id = mlx5_query_nic_system_image_guid(dev);
+
+ chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ sizeof(struct mlx5_mapped_obj),
+ MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+
if (IS_ERR(chains_mapping)) {
err = PTR_ERR(chains_mapping);
goto err_mapping;
@@ -4980,6 +4962,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
+ u64 mapping_id;
int err = 0;
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
@@ -4996,8 +4979,12 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev));
#endif
- mapping = mapping_create(sizeof(struct tunnel_match_key),
- TUNNEL_INFO_BITS_MASK, true);
+ mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+
+ mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
+ sizeof(struct tunnel_match_key),
+ TUNNEL_INFO_BITS_MASK, true);
+
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_tun_mapping;
@@ -5005,7 +4992,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
uplink_priv->tunnel_mapping = mapping;
/* 0xFFF is reserved for stack devices slow path table mark */
- mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
+ mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 505bf811984a..2e504c7461c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -15,6 +15,15 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
vport->egress.offloads.fwd_rule = NULL;
}
+static void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->egress.offloads.bounce_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->egress.offloads.bounce_rule);
+ vport->egress.offloads.bounce_rule = NULL;
+}
+
static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
struct mlx5_flow_destination *fwd_dest)
@@ -87,6 +96,7 @@ static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
{
esw_acl_egress_vlan_destroy(vport);
esw_acl_egress_ofld_fwd2vport_destroy(vport);
+ esw_acl_egress_ofld_bounce_rule_destroy(vport);
}
static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
@@ -145,6 +155,12 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
vport->egress.offloads.fwd_grp = NULL;
}
+
+ if (!IS_ERR_OR_NULL(vport->egress.offloads.bounce_grp)) {
+ mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
+ vport->egress.offloads.bounce_grp = NULL;
+ }
+
esw_acl_egress_vlan_grp_destroy(vport);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 69a3630818d7..7e221038df8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -5,6 +5,7 @@
#include <linux/notifier.h>
#include <net/netevent.h>
#include <net/switchdev.h>
+#include "lib/devcom.h"
#include "bridge.h"
#include "eswitch.h"
#include "bridge_priv.h"
@@ -56,7 +57,6 @@ struct mlx5_esw_bridge {
struct list_head fdb_list;
struct rhashtable fdb_ht;
- struct xarray vports;
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
@@ -77,6 +77,15 @@ mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *
call_switchdev_notifiers(val, dev, &send_info.info, NULL);
}
+static void
+mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
+{
+ if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+}
+
static struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
{
@@ -400,9 +409,10 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
}
static struct mlx5_flow_handle *
-mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
- struct mlx5_esw_bridge *bridge)
+mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge *bridge,
+ struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
struct mlx5_flow_act flow_act = {
@@ -430,7 +440,7 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
MLX5_SET(fte_match_param, rule_spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
if (vlan && vlan->pkt_reformat_push) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -459,6 +469,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
}
static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge *bridge)
+{
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ bridge, bridge->br_offloads->esw);
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
+ static struct mlx5_flow_handle *handle;
+ struct mlx5_eswitch *peer_esw;
+
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ return ERR_PTR(-ENODEV);
+
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ bridge, peer_esw);
+
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
struct mlx5_esw_bridge *bridge)
{
@@ -505,7 +544,7 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a
}
static struct mlx5_flow_handle *
-mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
+mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge *bridge)
{
@@ -550,6 +589,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
vlan->vid);
}
+ if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
+ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ dest.vport.vhca_id = esw_owner_vhca_id;
+ }
handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);
@@ -576,7 +619,6 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
goto err_fdb_ht;
INIT_LIST_HEAD(&bridge->fdb_list);
- xa_init(&bridge->vports);
bridge->ifindex = ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
@@ -603,7 +645,6 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
return;
mlx5_esw_bridge_egress_table_cleanup(bridge);
- WARN_ON(!xa_empty(&bridge->vports));
list_del(&bridge->list);
rhashtable_destroy(&bridge->fdb_ht);
kvfree(bridge);
@@ -639,30 +680,40 @@ mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads
return bridge;
}
+static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
+{
+ return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
+}
+
+static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
+{
+ return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
+}
+
static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
- struct mlx5_esw_bridge *bridge)
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
- return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL);
+ return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
}
static struct mlx5_esw_bridge_port *
-mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge)
+mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
- return xa_load(&bridge->vports, vport_num);
+ return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
+ esw_owner_vhca_id));
}
static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
- struct mlx5_esw_bridge *bridge)
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
- xa_erase(&bridge->vports, port->vport_num);
+ xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
}
-static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse,
- struct mlx5_esw_bridge_fdb_entry *entry)
+static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
{
trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
- entry->lastuse = lastuse;
mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
entry->key.vid,
SWITCHDEV_FDB_ADD_TO_BRIDGE);
@@ -690,10 +741,7 @@ static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
- if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
- mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
- entry->key.vid,
- SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_del_notify(entry);
mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
}
}
@@ -841,10 +889,7 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
- if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
- mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
- entry->key.vid,
- SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_del_notify(entry);
mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
}
@@ -875,13 +920,13 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
}
static struct mlx5_esw_bridge_vlan *
-mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge,
- struct mlx5_eswitch *esw)
+mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge_vlan *vlan;
- port = mlx5_esw_bridge_port_lookup(vport_num, bridge);
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
if (!port) {
/* FDB is added asynchronously on wq while port might have been deleted
* concurrently. Report on 'info' logging level and skip the FDB offload.
@@ -904,24 +949,23 @@ mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge
}
static struct mlx5_esw_bridge_fdb_entry *
-mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr,
- u16 vid, bool added_by_user, struct mlx5_eswitch *esw,
- struct mlx5_esw_bridge *bridge)
+mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
+ struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
{
struct mlx5_esw_bridge_vlan *vlan = NULL;
struct mlx5_esw_bridge_fdb_entry *entry;
struct mlx5_flow_handle *handle;
struct mlx5_fc *counter;
- struct mlx5e_priv *priv;
int err;
if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
- vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw);
+ vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
+ esw);
if (IS_ERR(vlan))
return ERR_CAST(vlan);
}
- priv = netdev_priv(dev);
entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
@@ -930,19 +974,25 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsi
entry->key.vid = vid;
entry->dev = dev;
entry->vport_num = vport_num;
+ entry->esw_owner_vhca_id = esw_owner_vhca_id;
entry->lastuse = jiffies;
if (added_by_user)
entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
+ if (peer)
+ entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
- counter = mlx5_fc_create(priv->mdev, true);
+ counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
goto err_ingress_fc_create;
}
entry->ingress_counter = counter;
- handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter),
- bridge);
+ handle = peer ?
+ mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
+ mlx5_fc_id(counter), bridge) :
+ mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
+ mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
@@ -962,7 +1012,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsi
entry->filter_handle = handle;
}
- handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge);
+ handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
+ bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
@@ -994,32 +1045,37 @@ err_egress_flow_create:
err_ingress_filter_flow_create:
mlx5_del_flow_rules(entry->ingress_handle);
err_ingress_flow_create:
- mlx5_fc_destroy(priv->mdev, entry->ingress_counter);
+ mlx5_fc_destroy(esw->dev, entry->ingress_counter);
err_ingress_fc_create:
kvfree(entry);
return ERR_PTR(err);
}
-int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
- if (!vport->bridge)
+ struct mlx5_esw_bridge_port *port;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port)
return -EINVAL;
- vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
+ port->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
return 0;
}
-int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
+ struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
bool filtering;
- if (!vport->bridge)
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port)
return -EINVAL;
- bridge = vport->bridge;
+ bridge = port->bridge;
filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
if (filtering == enable)
return 0;
@@ -1033,114 +1089,143 @@ int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
return 0;
}
-static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads,
- struct mlx5_esw_bridge *bridge,
- struct mlx5_vport *vport)
+static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge)
{
struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_esw_bridge_port *port;
int err;
port = kvzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto err_port_alloc;
- }
+ if (!port)
+ return -ENOMEM;
- port->vport_num = vport->vport;
+ port->vport_num = vport_num;
+ port->esw_owner_vhca_id = esw_owner_vhca_id;
+ port->bridge = bridge;
+ port->flags |= flags;
xa_init(&port->vlans);
- err = mlx5_esw_bridge_port_insert(port, bridge);
+ err = mlx5_esw_bridge_port_insert(port, br_offloads);
if (err) {
- esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n",
- vport->vport, err);
+ esw_warn(esw->dev,
+ "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
+ port->vport_num, port->esw_owner_vhca_id, err);
goto err_port_insert;
}
trace_mlx5_esw_bridge_vport_init(port);
- vport->bridge = bridge;
return 0;
err_port_insert:
kvfree(port);
-err_port_alloc:
- mlx5_esw_bridge_put(br_offloads, bridge);
return err;
}
static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
- struct mlx5_vport *vport)
+ struct mlx5_esw_bridge_port *port)
{
- struct mlx5_esw_bridge *bridge = vport->bridge;
+ u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
+ struct mlx5_esw_bridge *bridge = port->bridge;
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
- struct mlx5_esw_bridge_port *port;
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
- if (entry->vport_num == vport->vport)
+ if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- port = mlx5_esw_bridge_port_lookup(vport->vport, bridge);
- if (!port) {
- WARN(1, "Vport %u metadata not found on bridge", vport->vport);
- return -EINVAL;
- }
-
trace_mlx5_esw_bridge_vport_cleanup(port);
mlx5_esw_bridge_port_vlans_flush(port, bridge);
- mlx5_esw_bridge_port_erase(port, bridge);
+ mlx5_esw_bridge_port_erase(port, br_offloads);
kvfree(port);
mlx5_esw_bridge_put(br_offloads, bridge);
- vport->bridge = NULL;
return 0;
}
-int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
- struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ u16 flags,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_bridge *bridge;
int err;
- WARN_ON(vport->bridge);
-
bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
if (IS_ERR(bridge)) {
NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
return PTR_ERR(bridge);
}
- err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport);
- if (err)
+ err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
+ goto err_vport;
+ }
+ return 0;
+
+err_vport:
+ mlx5_esw_bridge_put(br_offloads, bridge);
return err;
}
-int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
- struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_esw_bridge *bridge = vport->bridge;
+ return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
+ br_offloads, extack);
+}
+
+int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge_port *port;
int err;
- if (!bridge) {
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port) {
NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
return -EINVAL;
}
- if (bridge->ifindex != ifindex) {
+ if (port->bridge->ifindex != ifindex) {
NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
return -EINVAL;
}
- err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
+ err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
return err;
}
-int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
+{
+ if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
+ return 0;
+
+ return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
+ MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
+ br_offloads, extack);
+}
+
+int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
+{
+ return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
+ extack);
+}
+
+int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge_vlan *vlan;
- port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
if (!port)
return -EINVAL;
@@ -1148,10 +1233,10 @@ int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
if (vlan) {
if (vlan->flags == flags)
return 0;
- mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
}
- vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, esw);
+ vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
if (IS_ERR(vlan)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
return PTR_ERR(vlan);
@@ -1159,62 +1244,93 @@ int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
return 0;
}
-void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
+ struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge_vlan *vlan;
- port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
if (!port)
return;
vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
if (!vlan)
return;
- mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
}
-void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct switchdev_notifier_fdb_info *fdb_info)
+void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info)
{
- struct mlx5_esw_bridge *bridge = vport->bridge;
struct mlx5_esw_bridge_fdb_entry *entry;
- u16 vport_num = vport->vport;
+ struct mlx5_esw_bridge_fdb_key key;
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
- if (!bridge) {
- esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
+ return;
+
+ bridge = port->bridge;
+ ether_addr_copy(key.addr, fdb_info->addr);
+ key.vid = fdb_info->vid;
+ entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ if (!entry) {
+ esw_debug(br_offloads->esw->dev,
+ "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ key.addr, key.vid, vport_num);
return;
}
- entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid,
- fdb_info->added_by_user, esw, bridge);
+ entry->lastuse = jiffies;
+}
+
+void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port)
+ return;
+
+ bridge = port->bridge;
+ entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
+ fdb_info->vid, fdb_info->added_by_user,
+ port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
+ br_offloads->esw, bridge);
if (IS_ERR(entry))
return;
if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
SWITCHDEV_FDB_OFFLOADED);
- else
+ else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
/* Take over dynamic entries to prevent kernel bridge from aging them out. */
mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
SWITCHDEV_FDB_ADD_TO_BRIDGE);
}
-void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
+void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info)
{
- struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry;
struct mlx5_esw_bridge_fdb_key key;
- u16 vport_num = vport->vport;
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
- if (!bridge) {
- esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port)
return;
- }
+ bridge = port->bridge;
ether_addr_copy(key.addr, fdb_info->addr);
key.vid = fdb_info->vid;
entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
@@ -1225,9 +1341,7 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw
return;
}
- if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
- mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
- SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_del_notify(entry);
mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
}
@@ -1245,11 +1359,10 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
continue;
if (time_after(lastuse, entry->lastuse)) {
- mlx5_esw_bridge_fdb_entry_refresh(lastuse, entry);
- } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
- mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
- entry->key.vid,
- SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_refresh(entry);
+ } else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
+ time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
+ mlx5_esw_bridge_fdb_del_notify(entry);
mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
}
}
@@ -1258,13 +1371,11 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_eswitch *esw = br_offloads->esw;
- struct mlx5_vport *vport;
+ struct mlx5_esw_bridge_port *port;
unsigned long i;
- mlx5_esw_for_each_vport(esw, i, vport)
- if (vport->bridge)
- mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
+ xa_for_each(&br_offloads->ports, i, port)
+ mlx5_esw_bridge_vport_cleanup(br_offloads, port);
WARN_ONCE(!list_empty(&br_offloads->bridges),
"Cleaning up bridge offloads while still having bridges attached\n");
@@ -1279,6 +1390,7 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&br_offloads->bridges);
+ xa_init(&br_offloads->ports);
br_offloads->esw = esw;
esw->br_offloads = br_offloads;
@@ -1293,6 +1405,7 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
return;
mlx5_esw_bridge_flush(br_offloads);
+ WARN_ON(!xa_empty(&br_offloads->ports));
esw->br_offloads = NULL;
kvfree(br_offloads);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index d826942b27fc..efc39975226e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -7,6 +7,7 @@
#include <linux/notifier.h>
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/xarray.h>
#include "eswitch.h"
struct mlx5_flow_table;
@@ -15,6 +16,8 @@ struct mlx5_flow_group;
struct mlx5_esw_bridge_offloads {
struct mlx5_eswitch *esw;
struct list_head bridges;
+ struct xarray ports;
+
struct notifier_block netdev_nb;
struct notifier_block nb_blk;
struct notifier_block nb;
@@ -31,23 +34,36 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw);
void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw);
-int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
- struct mlx5_vport *vport, struct netlink_ext_ack *extack);
-int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
- struct mlx5_vport *vport, struct netlink_ext_ack *extack);
-void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
+int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info);
-void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
+void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info);
void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads);
-int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
- struct mlx5_vport *vport, struct netlink_ext_ack *extack);
-void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
+ struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
+ struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
+ struct mlx5_esw_bridge_offloads *br_offloads);
#endif /* __MLX5_ESW_BRIDGE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index d9ab2e8bc2cb..52964a82d6a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -19,6 +19,11 @@ struct mlx5_esw_bridge_fdb_key {
enum {
MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
+ MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1),
+};
+
+enum {
+ MLX5_ESW_BRIDGE_PORT_FLAG_PEER = BIT(0),
};
struct mlx5_esw_bridge_fdb_entry {
@@ -28,6 +33,7 @@ struct mlx5_esw_bridge_fdb_entry {
struct list_head list;
struct list_head vlan_list;
u16 vport_num;
+ u16 esw_owner_vhca_id;
u16 flags;
struct mlx5_flow_handle *ingress_handle;
@@ -47,6 +53,9 @@ struct mlx5_esw_bridge_vlan {
struct mlx5_esw_bridge_port {
u16 vport_num;
+ u16 esw_owner_vhca_id;
+ u16 flags;
+ struct mlx5_esw_bridge *bridge;
struct xarray vlans;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
index 227964b7d3b9..3401188e0a60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -85,11 +85,18 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_port_template,
TP_ARGS(port),
TP_STRUCT__entry(
__field(u16, vport_num)
+ __field(u16, esw_owner_vhca_id)
+ __field(u16, flags)
),
TP_fast_assign(
__entry->vport_num = port->vport_num;
+ __entry->esw_owner_vhca_id = port->esw_owner_vhca_id;
+ __entry->flags = port->flags;
),
- TP_printk("vport_num=%hu", __entry->vport_num)
+ TP_printk("vport_num=%hu esw_owner_vhca_id=%hu flags=%hx",
+ __entry->vport_num,
+ __entry->esw_owner_vhca_id,
+ __entry->flags)
);
DEFINE_EVENT(mlx5_esw_bridge_port_template,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 97e6cb6f13c1..2fde9f59e8b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1458,8 +1458,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
esw->mode = mode;
- mlx5_lag_update(esw->dev);
-
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
@@ -1494,7 +1492,7 @@ abort:
/**
* mlx5_eswitch_enable - Enable eswitch
* @esw: Pointer to eswitch
- * @num_vfs: Enable eswitch swich for given number of VFs.
+ * @num_vfs: Enable eswitch switch for given number of VFs.
* Caller must pass num_vfs > 0 when enabling eswitch for
* vf vports.
* mlx5_eswitch_enable() returns 0 on success or error code on failure.
@@ -1506,6 +1504,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!mlx5_esw_allowed(esw))
return 0;
+ mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
@@ -1519,6 +1518,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
esw->esw_funcs.num_vfs = num_vfs;
}
up_write(&esw->mode_lock);
+ mlx5_lag_enable_change(esw->dev);
return ret;
}
@@ -1550,8 +1550,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
- mlx5_lag_update(esw->dev);
-
if (old_mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
@@ -1567,10 +1565,12 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
if (!mlx5_esw_allowed(esw))
return;
+ mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw, clear_vf);
esw->esw_funcs.num_vfs = 0;
up_write(&esw->mode_lock);
+ mlx5_lag_enable_change(esw->dev);
}
static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
@@ -1759,7 +1759,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
ida_init(&esw->offloads.vport_metadata_ida);
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
+ lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock);
+ lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1793,6 +1795,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
+ lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
xa_destroy(&esw->offloads.vhca_map);
@@ -1889,8 +1892,7 @@ is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_is_sf_vport(esw, vport_num);
}
-int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
- struct devlink_port *port,
+int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack)
{
@@ -1899,7 +1901,7 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
int err = -EOPNOTSUPP;
u16 vport_num;
- esw = mlx5_devlink_eswitch_get(devlink);
+ esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
@@ -1923,8 +1925,7 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
return err;
}
-int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
- struct devlink_port *port,
+int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack)
{
@@ -1933,7 +1934,7 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
int err = -EOPNOTSUPP;
u16 vport_num;
- esw = mlx5_devlink_eswitch_get(devlink);
+ esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw)) {
NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
return PTR_ERR(esw);
@@ -2366,10 +2367,23 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
*/
void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{
+ if (!mlx5_esw_allowed(esw))
+ return;
up_write(&esw->mode_lock);
}
/**
+ * mlx5_esw_lock() - Take write lock on esw mode lock
+ * @esw: eswitch device.
+ */
+void mlx5_esw_lock(struct mlx5_eswitch *esw)
+{
+ if (!mlx5_esw_allowed(esw))
+ return;
+ down_write(&esw->mode_lock);
+}
+
+/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
* @dev: Pointer to core device
@@ -2384,3 +2398,15 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
+
+/**
+ * mlx5_eswitch_get_core_dev - Get the mdev device
+ * @esw : eswitch device.
+ *
+ * Return the mellanox core device which manages the eswitch.
+ */
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_allowed(esw) ? esw->dev : NULL;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index d562edf5b0bc..d3a5ff4f6140 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -86,6 +86,14 @@ struct mlx5_mapped_obj {
#define esw_chains(esw) \
((esw)->fdb_table.offloads.esw_chains_priv)
+enum {
+ MAPPING_TYPE_CHAIN,
+ MAPPING_TYPE_TUNNEL,
+ MAPPING_TYPE_TUNNEL_ENC_OPTS,
+ MAPPING_TYPE_LABELS,
+ MAPPING_TYPE_ZONE,
+};
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_handle *allow_rule;
@@ -124,6 +132,8 @@ struct vport_egress {
struct {
struct mlx5_flow_group *fwd_grp;
struct mlx5_flow_handle *fwd_rule;
+ struct mlx5_flow_handle *bounce_rule;
+ struct mlx5_flow_group *bounce_grp;
} offloads;
};
};
@@ -150,8 +160,6 @@ enum mlx5_eswitch_vport_event {
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
-struct mlx5_esw_bridge;
-
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -180,7 +188,6 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
- struct mlx5_esw_bridge *bridge;
};
struct mlx5_esw_indir_table;
@@ -315,6 +322,7 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
+ struct lock_class_key mode_lock_key;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -475,12 +483,10 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap);
-int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
- struct devlink_port *port,
+int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack);
-int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
- struct devlink_port *port,
+int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack);
@@ -699,11 +705,18 @@ void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
+void mlx5_esw_lock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
+int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw);
+void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw);
+int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -719,6 +732,9 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP);
}
+static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
+static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
+
static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
@@ -731,6 +747,23 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
{
return vport_num;
}
+
+static inline int
+mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw)
+{
+ return 0;
+}
+
+static inline void
+mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw) {}
+
+static inline int
+mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+{
+ return 0;
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3bb71a186004..49c7bf94332c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -927,6 +927,7 @@ out:
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
struct mlx5_eswitch_rep *rep,
u32 sqn)
{
@@ -945,10 +946,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
/* source vport is the esw manager */
- MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(rep->esw->dev, vhca_id));
+ MLX5_CAP_GEN(from_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
@@ -964,6 +965,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+
flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
@@ -1614,7 +1618,18 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
goto ns_err;
}
- table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
+ /* To be strictly correct:
+ * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
+ * should be:
+ * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
+ * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
+ * but as the peer device might not be in switchdev mode it's not
+ * possible. We use the fact that by default FW sets max vfs and max sfs
+ * to the same value on both devices. If it needs to be changed in the future note
+ * the peer miss group should also be created based on the number of
+ * total vports of the peer (currently is also uses esw->total_vports).
+ */
+ table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
/* create the slow path fdb with encap set, so further table instances
@@ -1671,7 +1686,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
source_eswitch_owner_vhca_id_valid, 1);
}
- ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
+ /* See comment above table_size calculation */
+ ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
@@ -2311,14 +2327,293 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
+static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
+ struct mlx5_eswitch *esw;
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_vport *vport;
+ int err;
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+ MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
+ MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+ MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
+
+ if (master) {
+ esw = master->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
+ MLX5_SET(set_flow_table_root_in, in, table_vport_number,
+ MLX5_VPORT_UPLINK);
+
+ ns = mlx5_get_flow_vport_acl_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport->index);
+ root = find_root(&ns->node);
+ mutex_lock(&root->chain_lock);
+
+ MLX5_SET(set_flow_table_root_in, in,
+ table_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(set_flow_table_root_in, in,
+ table_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(master, vhca_id));
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ root->root_ft->id);
+ } else {
+ esw = slave->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ ns = mlx5_get_flow_vport_acl_namespace(slave,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport->index);
+ root = find_root(&ns->node);
+ mutex_lock(&root->chain_lock);
+ MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
+ }
+
+ err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
+ mutex_unlock(&root->chain_lock);
+
+ return err;
+}
+
+static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+ int err;
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+ MLX5_SET(set_flow_table_root_in, in, table_type,
+ FS_FT_FDB);
+
+ if (master) {
+ ns = mlx5_get_flow_namespace(master,
+ MLX5_FLOW_NAMESPACE_FDB);
+ root = find_root(&ns->node);
+ mutex_lock(&root->chain_lock);
+ MLX5_SET(set_flow_table_root_in, in,
+ table_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(set_flow_table_root_in, in,
+ table_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(master, vhca_id));
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ root->root_ft->id);
+ } else {
+ ns = mlx5_get_flow_namespace(slave,
+ MLX5_FLOW_NAMESPACE_FDB);
+ root = find_root(&ns->node);
+ mutex_lock(&root->chain_lock);
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ root->root_ft->id);
+ }
+
+ err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
+ mutex_unlock(&root->chain_lock);
+
+ return err;
+}
+
+static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_table *acl)
+{
+ struct mlx5_flow_handle *flow_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ void *misc;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(slave, vhca_id));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = slave->priv.eswitch->manager_vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
+ &dest, 1);
+ if (IS_ERR(flow_rule))
+ err = PTR_ERR(flow_rule);
+ else
+ vport->egress.offloads.bounce_rule = flow_rule;
+
+ kvfree(spec);
+ return err;
+}
+
+static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_eswitch *esw = master->priv.eswitch;
+ struct mlx5_flow_table_attr ft_attr = {
+ .max_fte = 1, .prio = 0, .level = 0,
+ };
+ struct mlx5_flow_namespace *egress_ns;
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *g;
+ struct mlx5_vport *vport;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ egress_ns = mlx5_get_flow_vport_acl_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
+ if (!egress_ns)
+ return -EINVAL;
+
+ if (vport->egress.acl)
+ return -EINVAL;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ goto out;
+ }
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_port);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ goto err_group;
+ }
+
+ err = __esw_set_master_egress_rule(master, slave, vport, acl);
+ if (err)
+ goto err_rule;
+
+ vport->egress.acl = acl;
+ vport->egress.offloads.bounce_grp = g;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_group(g);
+err_group:
+ mlx5_destroy_flow_table(acl);
+out:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
+ dev->priv.eswitch->manager_vport);
+
+ esw_acl_egress_ofld_cleanup(vport);
+}
+
+int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw)
+{
+ int err;
+
+ err = esw_set_uplink_slave_ingress_root(master_esw->dev,
+ slave_esw->dev);
+ if (err)
+ return -EINVAL;
+
+ err = esw_set_slave_root_fdb(master_esw->dev,
+ slave_esw->dev);
+ if (err)
+ goto err_fdb;
+
+ err = esw_set_master_egress_rule(master_esw->dev,
+ slave_esw->dev);
+ if (err)
+ goto err_acl;
+
+ return err;
+
+err_acl:
+ esw_set_slave_root_fdb(NULL, slave_esw->dev);
+
+err_fdb:
+ esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
+
+ return err;
+}
+
+void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw)
+{
+ esw_unset_master_egress_rule(master_esw->dev);
+ esw_set_slave_root_fdb(NULL, slave_esw->dev);
+ esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
+}
+
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
-static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
- struct mlx5_eswitch *peer_esw)
+static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
{
+ const struct mlx5_eswitch_rep_ops *ops;
+ struct mlx5_eswitch_rep *rep;
+ unsigned long i;
+ u8 rep_type;
- return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ rep_type = NUM_REP_TYPES;
+ while (rep_type--) {
+ ops = esw->offloads.rep_ops[rep_type];
+ if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
+ ops->event)
+ ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL);
+ }
+ }
}
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
@@ -2326,9 +2621,42 @@ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5e_tc_clean_fdb_peer_flows(esw);
#endif
+ mlx5_esw_offloads_rep_event_unpair(esw);
esw_del_fdb_peer_miss_rules(esw);
}
+static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw)
+{
+ const struct mlx5_eswitch_rep_ops *ops;
+ struct mlx5_eswitch_rep *rep;
+ unsigned long i;
+ u8 rep_type;
+ int err;
+
+ err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
+ if (err)
+ return err;
+
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ ops = esw->offloads.rep_ops[rep_type];
+ if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
+ ops->event) {
+ err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
+ if (err)
+ goto err_out;
+ }
+ }
+ }
+
+ return 0;
+
+err_out:
+ mlx5_esw_offloads_unpair(esw);
+ return err;
+}
+
static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
bool pair)
@@ -2619,6 +2947,31 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
+int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+{
+ struct mlx5_eswitch_rep *rep;
+ unsigned long i;
+ int ret;
+
+ if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
+
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
+ return 0;
+
+ ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+ if (ret)
+ return ret;
+
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
+ mlx5_esw_offloads_rep_load(esw, rep->vport);
+ }
+
+ return 0;
+}
+
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_indir_table *indir;
@@ -2788,6 +3141,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
unsigned long i;
+ u64 mapping_id;
int err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
@@ -2811,9 +3165,13 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vport_metadata;
- reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj),
- ESW_REG_C0_USER_DATA_METADATA_MASK,
- true);
+ mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+
+ reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ sizeof(struct mlx5_mapped_obj),
+ ESW_REG_C0_USER_DATA_METADATA_MASK,
+ true);
+
if (IS_ERR(reg_c0_obj_pool)) {
err = PTR_ERR(reg_c0_obj_pool);
goto err_pool;
@@ -2991,10 +3349,11 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
- return err;
+ goto enable_lag;
}
cur_mlx5_mode = err;
err = 0;
@@ -3018,6 +3377,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
unlock:
mlx5_esw_unlock(esw);
+enable_lag:
+ mlx5_lag_enable_change(esw->dev);
return err;
}
@@ -3091,8 +3452,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
- if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
+ if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
+ err = 0;
goto out;
+ }
+
fallthrough;
case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d713ae24d6b6..a1ac3a654962 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -27,7 +27,7 @@ static int pcie_core(struct notifier_block *, unsigned long, void *);
static int forward_event(struct notifier_block *, unsigned long, void *);
static struct mlx5_nb events_nbs_ref[] = {
- /* Events to be proccessed by mlx5_core */
+ /* Events to be processed by mlx5_core */
{.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
{.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
{.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index d5da4ab65766..306279b7f9e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -453,7 +453,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
- MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0bba92cf5dc0..8ec148010d62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -1516,7 +1516,7 @@ static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
mutex_lock(&fpga_xfrm->lock);
if (!fpga_xfrm->sa_ctx)
- /* Unbounded xfrm, chane only sw attrs */
+ /* Unbounded xfrm, change only sw attrs */
goto change_sw_xfrm_attrs;
/* copy original hw sa */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 896a6c3dbdb7..7db8df64a60e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -152,17 +152,56 @@ static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
return 0;
}
+static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave,
+ bool ft_id_valid,
+ u32 ft_id)
+{
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+ MLX5_SET(set_flow_table_root_in, in, table_type,
+ FS_FT_FDB);
+ if (ft_id_valid) {
+ MLX5_SET(set_flow_table_root_in, in,
+ table_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(set_flow_table_root_in, in,
+ table_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(master, vhca_id));
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ ft_id);
+ } else {
+ ns = mlx5_get_flow_namespace(slave,
+ MLX5_FLOW_NAMESPACE_FDB);
+ root = find_root(&ns->node);
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ root->root_ft->id);
+ }
+
+ return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
underlay_qpn == 0)
return 0;
+ if (ft->type == FS_FT_FDB &&
+ mlx5_lag_is_shared_fdb(dev) &&
+ !mlx5_lag_is_master(dev))
+ return 0;
+
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
@@ -177,7 +216,24 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
- return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
+ err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
+ if (!err &&
+ ft->type == FS_FT_FDB &&
+ mlx5_lag_is_shared_fdb(dev) &&
+ mlx5_lag_is_master(dev)) {
+ err = mlx5_cmd_set_slave_root_fdb(dev,
+ mlx5_lag_get_peer_mdev(dev),
+ !disconnect, (!disconnect) ?
+ ft->id : 0);
+ if (err && !disconnect) {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ ns->root_ft->id);
+ mlx5_cmd_exec_in(dev, set_flow_table_root, in);
+ }
+ }
+
+ return err;
}
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c0697e1b7118..9fe8e3c204d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -413,7 +413,7 @@ static bool check_valid_spec(const struct mlx5_flow_spec *spec)
return true;
}
-static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
+struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
{
struct fs_node *root;
struct mlx5_flow_namespace *ns;
@@ -2343,7 +2343,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
#define FLOW_TABLE_BIT_SZ 1
#define GET_FLOW_TABLE_CAP(dev, offset) \
- ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
+ ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
offset / 32)) >> \
(32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
@@ -2493,7 +2493,7 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
/* If this a prio with chains, and we can jump from one chain
- * (namepsace) to another, so we accumulate the levels
+ * (namespace) to another, so we accumulate the levels
*/
if (prio->node.type == FS_TYPE_PRIO_CHAINS)
acc_level = acc_level_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 7317cdeab661..98240badc342 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -294,6 +294,8 @@ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9abeb80ffa31..037e18dd4be0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -170,7 +170,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* The reset only needs to be issued by one PF. The health buffer is
* shared between all functions, and will be cleared during a reset.
- * Check again to avoid a redundant 2nd reset. If the fatal erros was
+ * Check again to avoid a redundant 2nd reset. If the fatal errors was
* PCI related a reset won't help.
*/
fatal_error = mlx5_health_check_fatal_sensors(dev);
@@ -213,10 +213,6 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
mutex_lock(&dev->intf_state_mutex);
if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock;/* a previous error is still being handled */
- if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
- dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- goto unlock;
- }
enter_error_state(dev, force);
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 7d7ed025db0d..67571e5040d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -50,7 +50,7 @@ static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
.ndo_change_mtu = mlx5i_change_mtu,
- .ndo_do_ioctl = mlx5i_ioctl,
+ .ndo_eth_ioctl = mlx5i_ioctl,
};
/* IPoIB mlx5 netdev profile */
@@ -314,8 +314,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
- struct ttc_params ttc_params = {};
- int tt, err;
+ int err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -330,33 +329,15 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- mlx5e_set_ttc_basic_params(priv, &ttc_params);
- mlx5e_set_inner_ttc_ft_params(&ttc_params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
-
- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
- if (err) {
- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
- err);
- goto err_destroy_arfs_tables;
- }
-
- mlx5e_set_ttc_ft_params(&ttc_params);
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
- ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
-
- err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
+ err = mlx5e_create_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
- goto err_destroy_inner_ttc_table;
+ goto err_destroy_arfs_tables;
}
return 0;
-err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -365,17 +346,20 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
+ mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_nch = priv->max_nch;
+ struct mlx5e_lro_param lro_param;
int err;
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res)
+ return -ENOMEM;
+
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -384,54 +368,38 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- err = mlx5e_create_indirect_rqt(priv);
+ lro_param = mlx5e_get_lro_param(&priv->channels.params);
+ err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
+ priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
- if (err)
- goto err_destroy_indirect_rqts;
-
- err = mlx5e_create_indirect_tirs(priv, true);
- if (err)
- goto err_destroy_direct_rqts;
-
- err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
- if (err)
- goto err_destroy_indirect_tirs;
-
err = mlx5i_create_flow_steering(priv);
if (err)
- goto err_destroy_direct_tirs;
+ goto err_destroy_rx_res;
return 0;
-err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
-err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv);
-err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
-err_destroy_indirect_rqts:
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+err_destroy_rx_res:
+ mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
+ mlx5e_rx_res_free(priv->rx_res);
+ priv->rx_res = NULL;
return err;
}
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
- u16 max_nch = priv->max_nch;
-
mlx5i_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_indirect_tirs(priv);
- mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
- mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
+ mlx5e_rx_res_free(priv->rx_res);
+ priv->rx_res = NULL;
}
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 18ee21b06a00..5308f23702bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -149,7 +149,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_get_stats64 = mlx5i_get_stats,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
- .ndo_do_ioctl = mlx5i_pkey_ioctl,
+ .ndo_eth_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 5c043c5cc403..f4dfa55c8c7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -32,7 +32,9 @@
#include <linux/netdevice.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lag.h"
@@ -45,7 +47,7 @@
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2)
+ u8 remap_port2, bool shared_fdb)
{
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
@@ -54,6 +56,7 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+ MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@@ -224,35 +227,59 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
- struct lag_tracker *tracker)
+ struct lag_tracker *tracker,
+ bool shared_fdb)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
&ldev->v2p_map[MLX5_LAG_P2]);
- mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
- ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d",
+ ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
+ shared_fdb);
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2]);
- if (err)
+ ldev->v2p_map[MLX5_LAG_P2], shared_fdb);
+ if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
err);
+ return err;
+ }
+
+ if (shared_fdb) {
+ err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
+ dev1->priv.eswitch);
+ if (err)
+ mlx5_core_err(dev0, "Can't enable single FDB mode\n");
+ else
+ mlx5_core_info(dev0, "Operation mode is single FDB\n");
+ }
+
+ if (err) {
+ MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+ if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
+ mlx5_core_err(dev0,
+ "Failed to deactivate RoCE LAG; driver restart required\n");
+ }
+
return err;
}
int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags)
+ u8 flags,
+ bool shared_fdb)
{
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- err = mlx5_create_lag(ldev, tracker);
+ err = mlx5_create_lag(ldev, tracker, shared_fdb);
if (err) {
if (roce_lag) {
mlx5_core_err(dev0,
@@ -266,6 +293,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
}
ldev->flags |= flags;
+ ldev->shared_fdb = shared_fdb;
return 0;
}
@@ -278,6 +306,12 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
+ if (ldev->shared_fdb) {
+ mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
+ ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
+ ldev->shared_fdb = false;
+ }
+
MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
if (err) {
@@ -333,6 +367,10 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
if (!ldev->pf[i].dev)
continue;
+ if (ldev->pf[i].dev->priv.flags &
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+ continue;
+
ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(ldev->pf[i].dev);
}
@@ -342,12 +380,15 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ bool shared_fdb = ldev->shared_fdb;
bool roce_lag;
int err;
roce_lag = __mlx5_lag_is_roce(ldev);
- if (roce_lag) {
+ if (shared_fdb) {
+ mlx5_lag_remove_devices(ldev);
+ } else if (roce_lag) {
if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
@@ -359,8 +400,34 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
if (err)
return;
- if (roce_lag)
+ if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+
+ if (shared_fdb) {
+ if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
+ mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
+ mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ }
+}
+
+static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+
+ if (is_mdev_switchdev_mode(dev0) &&
+ is_mdev_switchdev_mode(dev1) &&
+ mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
+ mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
+ mlx5_devcom_is_paired(dev0->priv.devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS) &&
+ MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
+ MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
+ MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
+ return true;
+
+ return false;
}
static void mlx5_do_bond(struct mlx5_lag *ldev)
@@ -371,14 +438,17 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
bool do_bond, roce_lag;
int err;
- if (!mlx5_lag_is_ready(ldev))
- return;
-
- tracker = ldev->tracker;
+ if (!mlx5_lag_is_ready(ldev)) {
+ do_bond = false;
+ } else {
+ tracker = ldev->tracker;
- do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
+ do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
+ }
if (do_bond && !__mlx5_lag_is_active(ldev)) {
+ bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+
roce_lag = !mlx5_sriov_is_enabled(dev0) &&
!mlx5_sriov_is_enabled(dev1);
@@ -388,23 +458,40 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
- if (roce_lag)
+ if (shared_fdb || roce_lag)
mlx5_lag_remove_devices(ldev);
err = mlx5_activate_lag(ldev, &tracker,
roce_lag ? MLX5_LAG_FLAG_ROCE :
- MLX5_LAG_FLAG_SRIOV);
+ MLX5_LAG_FLAG_SRIOV,
+ shared_fdb);
if (err) {
- if (roce_lag)
+ if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
return;
- }
-
- if (roce_lag) {
+ } else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
mlx5_nic_vport_enable_roce(dev1);
+ } else if (shared_fdb) {
+ dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+
+ err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ if (!err)
+ err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+
+ if (err) {
+ dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ mlx5_deactivate_lag(ldev);
+ mlx5_lag_add_devices(ldev);
+ mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ mlx5_core_err(dev0, "Failed to enable lag\n");
+ return;
+ }
}
} else if (do_bond && __mlx5_lag_is_active(ldev)) {
mlx5_modify_lag(ldev, &tracker);
@@ -418,21 +505,48 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
}
+static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1)
+{
+ if (dev0)
+ mlx5_esw_lock(dev0->priv.eswitch);
+ if (dev1)
+ mlx5_esw_lock(dev1->priv.eswitch);
+}
+
+static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1)
+{
+ if (dev1)
+ mlx5_esw_unlock(dev1->priv.eswitch);
+ if (dev0)
+ mlx5_esw_unlock(dev0->priv.eswitch);
+}
+
static void mlx5_do_bond_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
bond_work);
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int status;
status = mlx5_dev_list_trylock();
if (!status) {
- /* 1 sec delay. */
mlx5_queue_bond_work(ldev, HZ);
return;
}
+ if (ldev->mode_changes_in_progress) {
+ mlx5_dev_list_unlock();
+ mlx5_queue_bond_work(ldev, HZ);
+ return;
+ }
+
+ mlx5_lag_lock_eswitches(dev0, dev1);
mlx5_do_bond(ldev);
+ mlx5_lag_unlock_eswitches(dev0, dev1);
mlx5_dev_list_unlock();
}
@@ -630,7 +744,7 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
}
/* Must be called with intf_mutex held */
-static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
+static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
@@ -638,7 +752,7 @@ static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
- return;
+ return 0;
tmp_dev = mlx5_get_next_phys_dev(dev);
if (tmp_dev)
@@ -648,15 +762,17 @@ static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
ldev = mlx5_lag_dev_alloc(dev);
if (!ldev) {
mlx5_core_err(dev, "Failed to alloc lag dev\n");
- return;
+ return 0;
}
} else {
+ if (ldev->mode_changes_in_progress)
+ return -EAGAIN;
mlx5_ldev_get(ldev);
}
mlx5_ldev_add_mdev(ldev, dev);
- return;
+ return 0;
}
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
@@ -667,7 +783,13 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
if (!ldev)
return;
+recheck:
mlx5_dev_list_lock();
+ if (ldev->mode_changes_in_progress) {
+ mlx5_dev_list_unlock();
+ msleep(100);
+ goto recheck;
+ }
mlx5_ldev_remove_mdev(ldev, dev);
mlx5_dev_list_unlock();
mlx5_ldev_put(ldev);
@@ -675,8 +797,16 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
+ int err;
+
+recheck:
mlx5_dev_list_lock();
- __mlx5_lag_dev_add_mdev(dev);
+ err = __mlx5_lag_dev_add_mdev(dev);
+ if (err) {
+ mlx5_dev_list_unlock();
+ msleep(100);
+ goto recheck;
+ }
mlx5_dev_list_unlock();
}
@@ -690,11 +820,11 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
if (!ldev)
return;
- if (__mlx5_lag_is_active(ldev))
- mlx5_disable_lag(ldev);
-
mlx5_ldev_remove_netdev(ldev, netdev);
ldev->flags &= ~MLX5_LAG_FLAG_READY;
+
+ if (__mlx5_lag_is_active(ldev))
+ mlx5_queue_bond_work(ldev, 0);
}
/* Must be called with intf_mutex held */
@@ -716,6 +846,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
+ mlx5_queue_bond_work(ldev, 0);
}
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
@@ -746,6 +877,21 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_lag_is_active);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ bool res;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_active(ldev) &&
+ dev == ldev->pf[MLX5_LAG_P1].dev;
+ spin_unlock(&lag_lock);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_is_master);
+
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -760,19 +906,50 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_lag_is_sriov);
-void mlx5_lag_update(struct mlx5_core_dev *dev)
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ bool res;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev(dev);
+ res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb;
+ spin_unlock(&lag_lock);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
+
+void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
{
+ struct mlx5_core_dev *dev0;
+ struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev;
mlx5_dev_list_lock();
+
ldev = mlx5_lag_dev(dev);
- if (!ldev)
- goto unlock;
+ dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ dev1 = ldev->pf[MLX5_LAG_P2].dev;
- mlx5_do_bond(ldev);
+ ldev->mode_changes_in_progress++;
+ if (__mlx5_lag_is_active(ldev)) {
+ mlx5_lag_lock_eswitches(dev0, dev1);
+ mlx5_disable_lag(ldev);
+ mlx5_lag_unlock_eswitches(dev0, dev1);
+ }
+ mlx5_dev_list_unlock();
+}
-unlock:
+void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+
+ mlx5_dev_list_lock();
+ ldev = mlx5_lag_dev(dev);
+ ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock();
+ mlx5_queue_bond_work(ldev, 0);
}
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
@@ -827,6 +1004,26 @@ unlock:
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_dev *peer_dev = NULL;
+ struct mlx5_lag *ldev;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ goto unlock;
+
+ peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
+ ldev->pf[MLX5_LAG_P2].dev :
+ ldev->pf[MLX5_LAG_P1].dev;
+
+unlock:
+ spin_unlock(&lag_lock);
+ return peer_dev;
+}
+EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
+
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 191392c37558..d4bae528954e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -39,6 +39,8 @@ struct lag_tracker {
*/
struct mlx5_lag {
u8 flags;
+ int mode_changes_in_progress;
+ bool shared_fdb;
u8 v2p_map[MLX5_MAX_PORTS];
struct kref ref;
struct lag_func pf[MLX5_MAX_PORTS];
@@ -71,7 +73,8 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags);
+ u8 flags,
+ bool shared_fdb);
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index c4bf8b679541..011b639b29bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -161,7 +161,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct lag_tracker tracker;
tracker = ldev->tracker;
- mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH);
+ mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH, false);
}
mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ce696d523493..ffac8a0e7a23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -749,7 +749,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
- /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
+ /* TODOL clock->ptp can be NULL if ptp_clock_register fails */
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
new file mode 100644
index 000000000000..749d17c0057d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "lib/fs_ttc.h"
+
+#define MLX5_TTC_NUM_GROUPS 3
+#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT)
+#define MLX5_TTC_GROUP2_SIZE BIT(1)
+#define MLX5_TTC_GROUP3_SIZE BIT(0)
+#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\
+ MLX5_TTC_GROUP2_SIZE +\
+ MLX5_TTC_GROUP3_SIZE)
+
+#define MLX5_INNER_TTC_NUM_GROUPS 3
+#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3)
+#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1)
+#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0)
+#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
+ MLX5_INNER_TTC_GROUP2_SIZE +\
+ MLX5_INNER_TTC_GROUP3_SIZE)
+
+/* L3/L4 traffic type classifier */
+struct mlx5_ttc_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+ struct mlx5_ttc_rule rules[MLX5_NUM_TT];
+ struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
+};
+
+struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
+{
+ return ttc->t;
+}
+
+static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
+{
+ int i;
+
+ for (i = 0; i < MLX5_NUM_TT; i++) {
+ if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
+ mlx5_del_flow_rules(ttc->rules[i].rule);
+ ttc->rules[i].rule = NULL;
+ }
+ }
+
+ for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
+ if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
+ mlx5_del_flow_rules(ttc->tunnel_rules[i]);
+ ttc->tunnel_rules[i] = NULL;
+ }
+ }
+}
+
+struct mlx5_etype_proto {
+ u16 etype;
+ u8 proto;
+};
+
+static struct mlx5_etype_proto ttc_rules[] = {
+ [MLX5_TT_IPV4_TCP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5_TT_IPV6_TCP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5_TT_IPV4_UDP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5_TT_IPV6_UDP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5_TT_IPV4_IPSEC_AH] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5_TT_IPV6_IPSEC_AH] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5_TT_IPV4_IPSEC_ESP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5_TT_IPV6_IPSEC_ESP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5_TT_IPV4] = {
+ .etype = ETH_P_IP,
+ .proto = 0,
+ },
+ [MLX5_TT_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = 0,
+ },
+ [MLX5_TT_ANY] = {
+ .etype = 0,
+ .proto = 0,
+ },
+};
+
+static struct mlx5_etype_proto ttc_tunnel_rules[] = {
+ [MLX5_TT_IPV4_GRE] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_GRE,
+ },
+ [MLX5_TT_IPV6_GRE] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_GRE,
+ },
+ [MLX5_TT_IPV4_IPIP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_IPIP,
+ },
+ [MLX5_TT_IPV6_IPIP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_IPIP,
+ },
+ [MLX5_TT_IPV4_IPV6] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_IPV6,
+ },
+ [MLX5_TT_IPV6_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_IPV6,
+ },
+
+};
+
+u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
+{
+ return ttc_tunnel_rules[tt].proto;
+}
+
+static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
+ u8 proto_type)
+{
+ switch (proto_type) {
+ case IPPROTO_GRE:
+ return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
+ default:
+ return false;
+ }
+}
+
+static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ if (mlx5_tunnel_proto_supported_rx(mdev,
+ ttc_tunnel_rules[tt].proto))
+ return true;
+ }
+ return false;
+}
+
+bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
+{
+ return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version));
+}
+
+static u8 mlx5_etype_to_ipv(u16 ethertype)
+{
+ if (ethertype == ETH_P_IP)
+ return 4;
+
+ if (ethertype == ETH_P_IPV6)
+ return 6;
+
+ return 0;
+}
+
+static struct mlx5_flow_handle *
+mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest, u16 etype, u8 proto)
+{
+ int match_ipv_outer =
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev,
+ ft_field_support.outer_ip_version);
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ u8 ipv;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ if (proto) {
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
+ }
+
+ ipv = mlx5_etype_to_ipv(etype);
+ if (match_ipv_outer && ipv) {
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
+ } else if (etype) {
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+ }
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(dev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
+ struct ttc_params *params,
+ struct mlx5_ttc_table *ttc)
+{
+ struct mlx5_flow_handle **trules;
+ struct mlx5_ttc_rule *rules;
+ struct mlx5_flow_table *ft;
+ int tt;
+ int err;
+
+ ft = ttc->t;
+ rules = ttc->rules;
+ for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ struct mlx5_ttc_rule *rule = &rules[tt];
+
+ rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
+ ttc_rules[tt].etype,
+ ttc_rules[tt].proto);
+ if (IS_ERR(rule->rule)) {
+ err = PTR_ERR(rule->rule);
+ rule->rule = NULL;
+ goto del_rules;
+ }
+ rule->default_dest = params->dests[tt];
+ }
+
+ if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
+ return 0;
+
+ trules = ttc->tunnel_rules;
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ if (!mlx5_tunnel_proto_supported_rx(dev,
+ ttc_tunnel_rules[tt].proto))
+ continue;
+ trules[tt] = mlx5_generate_ttc_rule(dev, ft,
+ &params->tunnel_dests[tt],
+ ttc_tunnel_rules[tt].etype,
+ ttc_tunnel_rules[tt].proto);
+ if (IS_ERR(trules[tt])) {
+ err = PTR_ERR(trules[tt]);
+ trules[tt] = NULL;
+ goto del_rules;
+ }
+ }
+
+ return 0;
+
+del_rules:
+ mlx5_cleanup_ttc_rules(ttc);
+ return err;
+}
+
+static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
+ bool use_ipv)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
+ if (!ttc->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ttc->g);
+ ttc->g = NULL;
+ return -ENOMEM;
+ }
+
+ /* L4 Group */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ if (use_ipv)
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TTC_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ /* L3 Group */
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TTC_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ /* Any Group */
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TTC_GROUP3_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ttc->g[ttc->num_groups]);
+ ttc->g[ttc->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_handle *
+mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest,
+ u16 etype, u8 proto)
+{
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ u8 ipv;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ ipv = mlx5_etype_to_ipv(etype);
+ if (etype && ipv) {
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
+ }
+
+ if (proto) {
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
+ }
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
+ struct ttc_params *params,
+ struct mlx5_ttc_table *ttc)
+{
+ struct mlx5_ttc_rule *rules;
+ struct mlx5_flow_table *ft;
+ int err;
+ int tt;
+
+ ft = ttc->t;
+ rules = ttc->rules;
+
+ for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ struct mlx5_ttc_rule *rule = &rules[tt];
+
+ rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
+ &params->dests[tt],
+ ttc_rules[tt].etype,
+ ttc_rules[tt].proto);
+ if (IS_ERR(rule->rule)) {
+ err = PTR_ERR(rule->rule);
+ rule->rule = NULL;
+ goto del_rules;
+ }
+ rule->default_dest = params->dests[tt];
+ }
+
+ return 0;
+
+del_rules:
+
+ mlx5_cleanup_ttc_rules(ttc);
+ return err;
+}
+
+static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
+ GFP_KERNEL);
+ if (!ttc->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ttc->g);
+ ttc->g = NULL;
+ return -ENOMEM;
+ }
+
+ /* L4 Group */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_INNER_TTC_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ /* L3 Group */
+ MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_INNER_TTC_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ /* Any Group */
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_INNER_TTC_GROUP3_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ttc->g[ttc->num_groups]);
+ ttc->g[ttc->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
+ struct ttc_params *params)
+{
+ struct mlx5_ttc_table *ttc;
+ int err;
+
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
+ WARN_ON_ONCE(params->ft_attr.max_fte);
+ params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
+ ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ if (IS_ERR(ttc->t)) {
+ err = PTR_ERR(ttc->t);
+ kvfree(ttc);
+ return ERR_PTR(err);
+ }
+
+ err = mlx5_create_inner_ttc_table_groups(ttc);
+ if (err)
+ goto destroy_ft;
+
+ err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
+ if (err)
+ goto destroy_ft;
+
+ return ttc;
+
+destroy_ft:
+ mlx5_destroy_ttc_table(ttc);
+ return ERR_PTR(err);
+}
+
+void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
+{
+ int i;
+
+ mlx5_cleanup_ttc_rules(ttc);
+ for (i = ttc->num_groups - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(ttc->g[i]))
+ mlx5_destroy_flow_group(ttc->g[i]);
+ ttc->g[i] = NULL;
+ }
+
+ kfree(ttc->g);
+ mlx5_destroy_flow_table(ttc->t);
+ kvfree(ttc);
+}
+
+struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
+ struct ttc_params *params)
+{
+ bool match_ipv_outer =
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev,
+ ft_field_support.outer_ip_version);
+ struct mlx5_ttc_table *ttc;
+ int err;
+
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
+ WARN_ON_ONCE(params->ft_attr.max_fte);
+ params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
+ ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ if (IS_ERR(ttc->t)) {
+ err = PTR_ERR(ttc->t);
+ kvfree(ttc);
+ return ERR_PTR(err);
+ }
+
+ err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
+ if (err)
+ goto destroy_ft;
+
+ err = mlx5_generate_ttc_table_rules(dev, params, ttc);
+ if (err)
+ goto destroy_ft;
+
+ return ttc;
+
+destroy_ft:
+ mlx5_destroy_ttc_table(ttc);
+ return ERR_PTR(err);
+}
+
+int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
+ struct mlx5_flow_destination *new_dest)
+{
+ return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
+ NULL);
+}
+
+struct mlx5_flow_destination
+mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
+
+ WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
+ "TTC[%d] default dest is not setup yet", type);
+
+ return *dest;
+}
+
+int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
+
+ return mlx5_ttc_fwd_dest(ttc, type, &dest);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
new file mode 100644
index 000000000000..ce95be8f8382
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_FS_TTC_H__
+#define __ML5_FS_TTC_H__
+
+#include <linux/mlx5/fs.h>
+
+enum mlx5_traffic_types {
+ MLX5_TT_IPV4_TCP,
+ MLX5_TT_IPV6_TCP,
+ MLX5_TT_IPV4_UDP,
+ MLX5_TT_IPV6_UDP,
+ MLX5_TT_IPV4_IPSEC_AH,
+ MLX5_TT_IPV6_IPSEC_AH,
+ MLX5_TT_IPV4_IPSEC_ESP,
+ MLX5_TT_IPV6_IPSEC_ESP,
+ MLX5_TT_IPV4,
+ MLX5_TT_IPV6,
+ MLX5_TT_ANY,
+ MLX5_NUM_TT,
+ MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY,
+};
+
+enum mlx5_tunnel_types {
+ MLX5_TT_IPV4_GRE,
+ MLX5_TT_IPV6_GRE,
+ MLX5_TT_IPV4_IPIP,
+ MLX5_TT_IPV6_IPIP,
+ MLX5_TT_IPV4_IPV6,
+ MLX5_TT_IPV6_IPV6,
+ MLX5_NUM_TUNNEL_TT,
+};
+
+struct mlx5_ttc_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_destination default_dest;
+};
+
+struct mlx5_ttc_table;
+
+struct ttc_params {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table_attr ft_attr;
+ struct mlx5_flow_destination dests[MLX5_NUM_TT];
+ bool inner_ttc;
+ struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
+};
+
+struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
+
+struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
+ struct ttc_params *params);
+void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc);
+
+struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
+ struct ttc_params *params);
+
+int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
+ struct mlx5_flow_destination *new_dest);
+struct mlx5_flow_destination
+mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type);
+int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type);
+
+bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
+u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt);
+
+#endif /* __MLX5_FS_TTC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 38084400ee8f..e3b0a131c3e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -40,7 +40,7 @@
struct mlx5_vxlan {
struct mlx5_core_dev *mdev;
- /* max_num_ports is usuallly 4, 16 buckets is more than enough */
+ /* max_num_ports is usually 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
struct mutex sync_lock; /* sync add/del port HW operations */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c84ad87c99bb..80cabf9b1787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -389,11 +389,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
switch (cap_mode) {
case HCA_CAP_OPMOD_GET_MAX:
- memcpy(dev->caps.hca_max[cap_type], hca_caps,
+ memcpy(dev->caps.hca[cap_type]->max, hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
case HCA_CAP_OPMOD_GET_CUR:
- memcpy(dev->caps.hca_cur[cap_type], hca_caps,
+ memcpy(dev->caps.hca[cap_type]->cur, hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
default:
@@ -469,7 +469,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
return err;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
- memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
#define ODP_CAP_SET_MAX(dev, field) \
@@ -514,7 +514,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
- memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur,
MLX5_ST_SZ_BYTES(cmd_hca_cap));
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
@@ -596,7 +596,7 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
return 0;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
- memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur,
MLX5_ST_SZ_BYTES(roce_cap));
MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
@@ -748,14 +748,12 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct mlx5_priv *priv = &dev->priv;
int err = 0;
mutex_init(&dev->pci_status_mutex);
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
- priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
err = mlx5_pci_enable_device(dev);
if (err) {
@@ -1179,6 +1177,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_ec;
}
+ mlx5_lag_add_mdev(dev);
err = mlx5_sriov_attach(dev);
if (err) {
mlx5_core_err(dev, "sriov init failed %d\n", err);
@@ -1186,11 +1185,11 @@ static int mlx5_load(struct mlx5_core_dev *dev)
}
mlx5_sf_dev_table_create(dev);
- mlx5_lag_add_mdev(dev);
return 0;
err_sriov:
+ mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
err_ec:
mlx5_sf_hw_table_destroy(dev);
@@ -1222,9 +1221,9 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
- mlx5_lag_remove_mdev(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
+ mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
@@ -1248,11 +1247,6 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err = 0;
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- mlx5_core_warn(dev, "interface is up, NOP\n");
- goto out;
- }
- /* remove any previous indication of internal error */
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_setup(dev, true);
@@ -1271,7 +1265,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+ err = mlx5_devlink_register(priv_to_devlink(dev));
if (err)
goto err_devlink_reg;
@@ -1293,7 +1287,6 @@ function_teardown:
mlx5_function_teardown(dev, true);
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
-out:
mutex_unlock(&dev->intf_state_mutex);
return err;
}
@@ -1380,6 +1373,60 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
+static const int types[] = {
+ MLX5_CAP_GENERAL,
+ MLX5_CAP_GENERAL_2,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ MLX5_CAP_ESWITCH_FLOW_TABLE,
+ MLX5_CAP_ESWITCH,
+ MLX5_CAP_VECTOR_CALC,
+ MLX5_CAP_QOS,
+ MLX5_CAP_DEBUG,
+ MLX5_CAP_DEV_MEM,
+ MLX5_CAP_DEV_EVENT,
+ MLX5_CAP_TLS,
+ MLX5_CAP_VDPA_EMULATION,
+ MLX5_CAP_IPSEC,
+};
+
+static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
+{
+ int type;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(types); i++) {
+ type = types[i];
+ kfree(dev->caps.hca[type]);
+ }
+}
+
+static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_hca_cap *cap;
+ int type;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(types); i++) {
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap)
+ goto err;
+ type = types[i];
+ dev->caps.hca[type] = cap;
+ }
+
+ return 0;
+
+err:
+ mlx5_hca_caps_free(dev);
+ return -ENOMEM;
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1399,6 +1446,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&priv->pgdir_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
+ priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
@@ -1415,8 +1463,14 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_adev_init;
+ err = mlx5_hca_caps_alloc(dev);
+ if (err)
+ goto err_hca_caps;
+
return 0;
+err_hca_caps:
+ mlx5_adev_cleanup(dev);
err_adev_init:
mlx5_pagealloc_cleanup(dev);
err_pagealloc_init:
@@ -1435,6 +1489,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
@@ -1452,7 +1507,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct devlink *devlink;
int err;
- devlink = mlx5_devlink_alloc();
+ devlink = mlx5_devlink_alloc(&pdev->dev);
if (!devlink) {
dev_err(&pdev->dev, "devlink alloc failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index da365b8f0141..230eab7e3bc9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -168,6 +168,8 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
+void mlx5_lag_disable_change(struct mlx5_core_dev *dev);
+void mlx5_lag_enable_change(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
@@ -275,4 +277,9 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
}
+
+bool mlx5_eth_supported(struct mlx5_core_dev *dev);
+bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
+bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 3465b363fc2f..c79a10b3454d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -18,7 +18,7 @@
#define MLX5_SFS_PER_CTRL_IRQ 64
#define MLX5_IRQ_CTRL_SF_MAX 8
-/* min num of vectores for SFs to be enabled */
+/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
@@ -28,13 +28,13 @@
#define MLX5_EQ_REFS_PER_IRQ (2)
struct mlx5_irq {
- u32 index;
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
- struct kref kref;
- int irqn;
struct mlx5_irq_pool *pool;
+ int refcount;
+ u32 index;
+ int irqn;
};
struct mlx5_irq_pool {
@@ -138,9 +138,8 @@ out:
return ret;
}
-static void irq_release(struct kref *kref)
+static void irq_release(struct mlx5_irq *irq)
{
- struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
@@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool;
mutex_lock(&pool->lock);
- kref_put(&irq->kref, irq_release);
+ irq->refcount--;
+ if (!irq->refcount)
+ irq_release(irq);
mutex_unlock(&pool->lock);
}
+static int irq_get_locked(struct mlx5_irq *irq)
+{
+ lockdep_assert_held(&irq->pool->lock);
+ if (WARN_ON_ONCE(!irq->refcount))
+ return 0;
+ irq->refcount++;
+ return 1;
+}
+
+static int irq_get(struct mlx5_irq *irq)
+{
+ int err;
+
+ mutex_lock(&irq->pool->lock);
+ err = irq_get_locked(irq);
+ mutex_unlock(&irq->pool->lock);
+ return err;
+}
+
static irqreturn_t irq_int_handler(int irq, void *nh)
{
atomic_notifier_call_chain(nh, 0, NULL);
@@ -215,7 +235,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
goto err_cpumask;
}
irq->pool = pool;
- kref_init(&irq->kref);
+ irq->refcount = 1;
irq->index = i;
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
if (err) {
@@ -235,18 +255,18 @@ err_req_irq:
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{
- int err;
+ int ret;
- err = kref_get_unless_zero(&irq->kref);
- if (WARN_ON_ONCE(!err))
+ ret = irq_get(irq);
+ if (!ret)
/* Something very bad happens here, we are enabling EQ
* on non-existing IRQ.
*/
return -ENOENT;
- err = atomic_notifier_chain_register(&irq->nh, nb);
- if (err)
+ ret = atomic_notifier_chain_register(&irq->nh, nb);
+ if (ret)
irq_put(irq);
- return err;
+ return ret;
}
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
@@ -304,10 +324,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
xa_for_each_range(&pool->irqs, index, iter, start, end) {
if (!cpumask_equal(iter->mask, affinity))
continue;
- if (kref_read(&iter->kref) < pool->min_threshold)
+ if (iter->refcount < pool->min_threshold)
return iter;
- if (!irq || kref_read(&iter->kref) <
- kref_read(&irq->kref))
+ if (!irq || iter->refcount < irq->refcount)
irq = iter;
}
return irq;
@@ -322,7 +341,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
mutex_lock(&pool->lock);
least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
if (least_loaded_irq &&
- kref_read(&least_loaded_irq->kref) < pool->min_threshold)
+ least_loaded_irq->refcount < pool->min_threshold)
goto out;
new_irq = irq_pool_create_irq(pool, affinity);
if (IS_ERR(new_irq)) {
@@ -340,11 +359,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
least_loaded_irq = new_irq;
goto unlock;
out:
- kref_get(&least_loaded_irq->kref);
- if (kref_read(&least_loaded_irq->kref) > pool->max_threshold)
+ irq_get_locked(least_loaded_irq);
+ if (least_loaded_irq->refcount > pool->max_threshold)
mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
least_loaded_irq->irqn, pool->name,
- kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ);
+ least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
unlock:
mutex_unlock(&pool->lock);
return least_loaded_irq;
@@ -360,7 +379,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
mutex_lock(&pool->lock);
irq = xa_load(&pool->irqs, vecidx);
if (irq) {
- kref_get(&irq->kref);
+ irq_get_locked(irq);
goto unlock;
}
irq = irq_request(pool, vecidx);
@@ -427,7 +446,7 @@ out:
return irq;
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
irq->irqn, cpumask_pr_args(affinity),
- kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ);
+ irq->refcount / MLX5_EQ_REFS_PER_IRQ);
return irq;
}
@@ -459,8 +478,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq;
unsigned long index;
+ /* There are cases in which we are destrying the irq_table before
+ * freeing all the IRQs, fast teardown for example. Hence, free the irqs
+ * which might not have been freed.
+ */
xa_for_each(&pool->irqs, index, irq)
- irq_release(&irq->kref);
+ irq_release(irq);
xa_destroy(&pool->irqs);
mutex_destroy(&pool->lock);
kvfree(pool);
@@ -483,7 +506,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
if (!mlx5_sf_max_functions(dev))
return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
- mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
+ mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
return 0;
}
@@ -601,7 +624,7 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
return;
/* There are cases where IRQs still will be in used when we reaching
- * to here. Hence, making sure all the irqs are realeased.
+ * to here. Hence, making sure all the irqs are released.
*/
irq_pools_destroy(table);
pci_free_irq_vectors(dev->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index fa0288afc0dd..871c2fbe18d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -39,7 +39,7 @@ static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, cha
struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
+ return sysfs_emit(buf, "%u\n", sf_dev->sfnum);
}
static DEVICE_ATTR_RO(sfnum);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 42c8ee03fe3e..052f48068dc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -14,7 +14,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
struct devlink *devlink;
int err;
- devlink = mlx5_devlink_alloc();
+ devlink = mlx5_devlink_alloc(&adev->dev);
if (!devlink)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 1be048769309..13891fdc607e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -164,12 +164,12 @@ static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
}
-int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
enum devlink_port_fn_state *state,
enum devlink_port_fn_opstate *opstate,
struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
struct mlx5_sf_table *table;
struct mlx5_sf *sf;
int err = 0;
@@ -248,11 +248,11 @@ out:
return err;
}
-int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
enum devlink_port_fn_state state,
struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
struct mlx5_sf_table *table;
struct mlx5_sf *sf;
int err;
@@ -476,7 +476,7 @@ static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
return;
/* Balances with refcount_set; drop the reference so that new user cmd cannot start
- * and new vhca event handler cannnot run.
+ * and new vhca event handler cannot run.
*/
mlx5_sf_table_put(table);
wait_for_completion(&table->disable_complete);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 81ce13b19ee8..3a480e06ecc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -24,11 +24,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink,
unsigned int *new_port_index);
int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
struct netlink_ext_ack *extack);
-int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
enum devlink_port_fn_state *state,
enum devlink_port_fn_opstate *opstate,
struct netlink_ext_ack *extack);
-int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
enum devlink_port_fn_state state,
struct netlink_ext_ack *extack);
#else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 9df0e73d1c35..8a1623a4d8bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -789,7 +789,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
- MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index a0a059e0154f..d22219613719 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -199,7 +199,7 @@ static int mlxbf_gige_stop(struct net_device *netdev)
return 0;
}
-static int mlxbf_gige_do_ioctl(struct net_device *netdev,
+static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
if (!(netif_running(netdev)))
@@ -253,7 +253,7 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = {
.ndo_start_xmit = mlxbf_gige_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mlxbf_gige_do_ioctl,
+ .ndo_eth_ioctl = mlxbf_gige_eth_ioctl,
.ndo_set_rx_mode = mlxbf_gige_set_rx_mode,
.ndo_get_stats64 = mlxbf_gige_get_stats64,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 12871c8dc7c1..d1ae248e125c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -58,10 +58,10 @@ config MLXSW_SPECTRUM
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
depends on VXLAN || VXLAN=n
+ depends on PTP_1588_CLOCK_OPTIONAL
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
- imply PTP_1588_CLOCK
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
help
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e775f08fb464..f080fab3de2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1927,7 +1927,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!reload) {
alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size,
+ mlxsw_bus_info->dev);
if (!devlink) {
err = -ENOMEM;
goto err_devlink_alloc;
@@ -1974,7 +1975,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_emad_init;
if (!reload) {
- err = devlink_register(devlink, mlxsw_bus_info->dev);
+ err = devlink_register(devlink);
if (err)
goto err_devlink_register;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88699e678544..081408e892d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1207,7 +1207,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
.ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
- .ndo_do_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
};
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 8f90cd323d5f..22fede5cb32c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -335,14 +335,16 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
- struct net_device *brport_dev)
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
if (!bridge_port)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
bridge_port->lagged = mlxsw_sp_port->lagged;
@@ -359,12 +361,23 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
list_add(&bridge_port->list, &bridge_device->ports_list);
bridge_port->ref_count = 1;
+ err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
return bridge_port;
+
+err_switchdev_offload:
+ list_del(&bridge_port->list);
+ kfree(bridge_port);
+ return ERR_PTR(err);
}
static void
mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
{
+ switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
list_del(&bridge_port->list);
WARN_ON(!list_empty(&bridge_port->vlans_list));
kfree(bridge_port);
@@ -390,9 +403,10 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
if (IS_ERR(bridge_device))
return ERR_CAST(bridge_device);
- bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
- if (!bridge_port) {
- err = -ENOMEM;
+ bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
+ extack);
+ if (IS_ERR(bridge_port)) {
+ err = PTR_ERR(bridge_port);
goto err_bridge_port_create;
}
@@ -1569,7 +1583,6 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
{
long *flood_bitmap;
int num_of_ports;
- int alloc_size;
u16 mid_idx;
int err;
@@ -1579,18 +1592,17 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
return false;
num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
- flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
+ flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
if (!flood_bitmap)
return false;
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
+ bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
mid->mid = mid_idx;
err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
bridge_device->mrouter);
- kfree(flood_bitmap);
+ bitmap_free(flood_bitmap);
if (err)
return false;
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 831518466de2..3f69bb59ba49 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -689,7 +689,7 @@ static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
static const struct net_device_ops ks8851_netdev_ops = {
.ndo_open = ks8851_net_open,
.ndo_stop = ks8851_net_stop,
- .ndo_do_ioctl = ks8851_net_ioctl,
+ .ndo_eth_ioctl = ks8851_net_ioctl,
.ndo_start_xmit = ks8851_start_xmit,
.ndo_set_mac_address = ks8851_set_mac_address,
.ndo_set_rx_mode = ks8851_set_rx_mode,
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7945eb5e2fe8..a0ee155f9f51 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6738,7 +6738,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_features = netdev_set_features,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_set_rx_mode = netdev_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netdev_netpoll,
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index d54aa164c4e9..735eea1dacf1 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -45,6 +45,7 @@ config ENCX24J600
config LAN743X
tristate "LAN743x support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
select CRC16
select CRC32
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dae10328c6cf..9e8561cdc32a 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2655,7 +2655,7 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_open = lan743x_netdev_open,
.ndo_stop = lan743x_netdev_close,
.ndo_start_xmit = lan743x_netdev_xmit_frame,
- .ndo_do_ioctl = lan743x_netdev_ioctl,
+ .ndo_eth_ioctl = lan743x_netdev_ioctl,
.ndo_set_rx_mode = lan743x_netdev_set_multicast,
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index a72e3b3b596e..649ca609884a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -93,9 +93,12 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
}
static int sparx5_port_bridge_join(struct sparx5_port *port,
- struct net_device *bridge)
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
{
struct sparx5 *sparx5 = port->sparx5;
+ struct net_device *ndev = port->ndev;
+ int err;
if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
/* First bridged port */
@@ -109,12 +112,21 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
set_bit(port->portno, sparx5->bridge_mask);
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
/* Port enters in bridge mode therefor don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
- __dev_mc_unsync(port->ndev, sparx5_mc_unsync);
+ __dev_mc_unsync(ndev, sparx5_mc_unsync);
return 0;
+
+err_switchdev_offload:
+ clear_bit(port->portno, sparx5->bridge_mask);
+ return err;
}
static void sparx5_port_bridge_leave(struct sparx5_port *port,
@@ -122,6 +134,8 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
{
struct sparx5 *sparx5 = port->sparx5;
+ switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
+
clear_bit(port->portno, sparx5->bridge_mask);
if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
sparx5->hw_bridge_dev = NULL;
@@ -139,11 +153,15 @@ static int sparx5_port_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct sparx5_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
int err = 0;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = sparx5_port_bridge_join(port, info->upper_dev);
+ err = sparx5_port_bridge_join(port, info->upper_dev,
+ extack);
else
sparx5_port_bridge_leave(port, info->upper_dev);
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 2d3157e4d081..b6a73d151dec 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -16,7 +16,7 @@ config MSCC_OCELOT_SWITCH_LIB
select NET_DEVLINK
select REGMAP_MMIO
select PACKING
- select PHYLIB
+ select PHYLINK
tristate
help
This is a hardware support library for Ocelot network switches. It is
@@ -24,6 +24,7 @@ config MSCC_OCELOT_SWITCH_LIB
config MSCC_OCELOT_SWITCH
tristate "Ocelot switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 2948d731a1c1..8ec194178aa2 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -377,7 +377,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
}
-int ocelot_port_flush(struct ocelot *ocelot, int port)
+static int ocelot_port_flush(struct ocelot *ocelot, int port)
{
unsigned int pause_ena;
int err, val;
@@ -429,63 +429,118 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
return err;
}
-EXPORT_SYMBOL(ocelot_port_flush);
-void ocelot_adjust_link(struct ocelot *ocelot, int port,
- struct phy_device *phydev)
+void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ unsigned long quirks)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int speed, mode = 0;
+ int err;
+
+ ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
+ DEV_MAC_ENA_CFG);
+
+ ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
+
+ err = ocelot_port_flush(ocelot, port);
+ if (err)
+ dev_err(ocelot->dev, "failed to flush port %d: %d\n",
+ port, err);
+
+ /* Put the port in reset. */
+ if (interface != PHY_INTERFACE_MODE_QSGMII ||
+ !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
+ ocelot_port_rmwl(ocelot_port,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG);
+}
+EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
+
+void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause,
+ unsigned long quirks)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int mac_speed, mode = 0;
+ u32 mac_fc_cfg;
+
+ /* The MAC might be integrated in systems where the MAC speed is fixed
+ * and it's the PCS who is performing the rate adaptation, so we have
+ * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
+ * (which is also its default value).
+ */
+ if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) ||
+ speed == SPEED_1000) {
+ mac_speed = OCELOT_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ } else if (speed == SPEED_2500) {
+ mac_speed = OCELOT_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ } else if (speed == SPEED_100) {
+ mac_speed = OCELOT_SPEED_100;
+ } else {
+ mac_speed = OCELOT_SPEED_10;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ mode |= DEV_MAC_MODE_CFG_FDX_ENA;
- switch (phydev->speed) {
+ ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG);
+
+ /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
+ * PORT_RST bits in DEV_CLOCK_CFG.
+ */
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed),
+ DEV_CLOCK_CFG);
+
+ switch (speed) {
case SPEED_10:
- speed = OCELOT_SPEED_10;
+ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10);
break;
case SPEED_100:
- speed = OCELOT_SPEED_100;
+ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100);
break;
case SPEED_1000:
- speed = OCELOT_SPEED_1000;
- mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
- break;
case SPEED_2500:
- speed = OCELOT_SPEED_2500;
- mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000);
break;
default:
- dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n",
- port, phydev->speed);
+ dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
+ port, speed);
return;
}
- phy_print_status(phydev);
-
- if (!phydev->link)
- return;
-
- /* Only full duplex supported for now */
- ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
- mode, DEV_MAC_MODE_CFG);
-
- /* Disable HDX fast control */
- ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
- DEV_PORT_MISC);
+ /* Handle RX pause in all cases, with 2500base-X this is used for rate
+ * adaptation.
+ */
+ mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
- /* SGMII only for now */
- ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
- PCS1G_MODE_CFG);
- ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+ if (tx_pause)
+ mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
- /* Enable PCS */
- ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+ /* Flow control. Link speed is only used here to evaluate the time
+ * specification in incoming pause frames.
+ */
+ ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
- /* No aneg on SGMII */
- ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
- /* No loopback */
- ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
- /* Enable MAC module */
+ /* Undo the effects of ocelot_phylink_mac_link_down:
+ * enable MAC module
+ */
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
@@ -502,39 +557,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
-
- /* Flow control */
- ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
- SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA |
- SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
- SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
- SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
- SYS_MAC_FC_CFG, port);
- ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
-}
-EXPORT_SYMBOL(ocelot_adjust_link);
-
-void ocelot_port_enable(struct ocelot *ocelot, int port,
- struct phy_device *phy)
-{
- /* Enable receiving frames on the port, and activate auto-learning of
- * MAC addresses.
- */
- ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
- ANA_PORT_PORT_CFG_RECV_ENA |
- ANA_PORT_PORT_CFG_PORTID_VAL(port),
- ANA_PORT_PORT_CFG, port);
-}
-EXPORT_SYMBOL(ocelot_port_enable);
-
-void ocelot_port_disable(struct ocelot *ocelot, int port)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
- ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
}
-EXPORT_SYMBOL(ocelot_port_disable);
+EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
struct sk_buff *clone)
@@ -1957,6 +1981,15 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
/* Disable source address learning for standalone mode */
ocelot_port_set_learning(ocelot, port, false);
+ /* Set the port's initial logical port ID value, enable receiving
+ * frames on it, and configure the MAC address learning type to
+ * automatic.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+
/* Enable vcap lookups */
ocelot_vcap_enable(ocelot, port);
}
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index db6b1a4c3926..1952d6a1b98a 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -12,8 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/net_tstamp.h>
-#include <linux/phy.h>
-#include <linux/phy/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -42,11 +41,9 @@ struct ocelot_port_tc {
struct ocelot_port_private {
struct ocelot_port port;
struct net_device *dev;
- struct phy_device *phy;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u8 chip_port;
-
- struct phy *serdes;
-
struct ocelot_port_tc tc;
};
@@ -107,7 +104,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
- struct phy_device *phy);
+ struct device_node *portnp);
void ocelot_release_port(struct ocelot_port *ocelot_port);
int ocelot_devlink_init(struct ocelot *ocelot);
void ocelot_devlink_teardown(struct ocelot *ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index e9d260d84bf3..5e8965be968a 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -9,10 +9,14 @@
*/
#include <linux/if_bridge.h>
+#include <linux/of_net.h>
+#include <linux/phy/phy.h>
#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
+
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
{
return devlink_priv(dlp->devlink);
@@ -381,15 +385,6 @@ static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
return 0;
}
-static void ocelot_port_adjust_link(struct net_device *dev)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
-
- ocelot_adjust_link(ocelot, port, dev->phydev);
-}
-
static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid,
bool untagged)
{
@@ -448,33 +443,8 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot_port *ocelot_port = &priv->port;
- struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->chip_port;
- int err;
-
- if (priv->serdes) {
- err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
- ocelot_port->phy_mode);
- if (err) {
- netdev_err(dev, "Could not set mode of SerDes\n");
- return err;
- }
- }
-
- err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
- ocelot_port->phy_mode);
- if (err) {
- netdev_err(dev, "Could not attach to PHY\n");
- return err;
- }
- dev->phydev = priv->phy;
-
- phy_attached_info(priv->phy);
- phy_start(priv->phy);
-
- ocelot_port_enable(ocelot, port, priv->phy);
+ phylink_start(priv->phylink);
return 0;
}
@@ -482,14 +452,8 @@ static int ocelot_port_open(struct net_device *dev)
static int ocelot_port_stop(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
- int port = priv->chip_port;
-
- phy_disconnect(priv->phy);
-
- dev->phydev = NULL;
- ocelot_port_disable(ocelot, port);
+ phylink_stop(priv->phylink);
return 0;
}
@@ -823,7 +787,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
- .ndo_do_ioctl = ocelot_ioctl,
+ .ndo_eth_ioctl = ocelot_ioctl,
.ndo_get_devlink_port = ocelot_get_devlink_port,
};
@@ -1154,38 +1118,19 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
struct net_device *bridge_dev,
struct netlink_ext_ack *extack)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_port_private *priv;
clock_t ageing_time;
u8 stp_state;
- int err;
-
- priv = container_of(ocelot_port, struct ocelot_port_private, port);
ocelot_inherit_brport_flags(ocelot, port, brport_dev);
stp_state = br_port_get_stp_state(brport_dev);
ocelot_bridge_stp_state_set(ocelot, port, stp_state);
- err = ocelot_port_vlan_filtering(ocelot, port,
- br_vlan_enabled(bridge_dev));
- if (err)
- return err;
-
ageing_time = br_get_ageing_time(bridge_dev);
ocelot_port_attr_ageing_set(ocelot, port, ageing_time);
- err = br_mdb_replay(bridge_dev, brport_dev, priv, true,
- &ocelot_switchdev_blocking_nb, extack);
- if (err && err != -EOPNOTSUPP)
- return err;
-
- err = br_vlan_replay(bridge_dev, brport_dev, priv, true,
- &ocelot_switchdev_blocking_nb, extack);
- if (err && err != -EOPNOTSUPP)
- return err;
-
- return 0;
+ return ocelot_port_vlan_filtering(ocelot, port,
+ br_vlan_enabled(bridge_dev));
}
static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
@@ -1216,6 +1161,13 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
ocelot_port_bridge_join(ocelot, port, bridge);
+ err = switchdev_bridge_port_offload(brport_dev, dev, priv,
+ &ocelot_netdevice_nb,
+ &ocelot_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack);
if (err)
goto err_switchdev_sync;
@@ -1223,10 +1175,24 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
return 0;
err_switchdev_sync:
+ switchdev_bridge_port_unoffload(brport_dev, priv,
+ &ocelot_netdevice_nb,
+ &ocelot_switchdev_blocking_nb);
+err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
return err;
}
+static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
+ struct net_device *brport_dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+
+ switchdev_bridge_port_unoffload(brport_dev, priv,
+ &ocelot_netdevice_nb,
+ &ocelot_switchdev_blocking_nb);
+}
+
static int ocelot_netdevice_bridge_leave(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge)
@@ -1279,6 +1245,18 @@ err_bridge_join:
return err;
}
+static void ocelot_netdevice_pre_lag_leave(struct net_device *dev,
+ struct net_device *bond)
+{
+ struct net_device *bridge_dev;
+
+ bridge_dev = netdev_master_upper_dev_get(bond);
+ if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
+ return;
+
+ ocelot_netdevice_pre_bridge_leave(dev, bond);
+}
+
static int ocelot_netdevice_lag_leave(struct net_device *dev,
struct net_device *bond)
{
@@ -1356,6 +1334,43 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
}
static int
+ocelot_netdevice_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ ocelot_netdevice_pre_bridge_leave(dev, brport_dev);
+
+ if (netif_is_lag_master(info->upper_dev) && !info->linking)
+ ocelot_netdevice_pre_lag_leave(dev, info->upper_dev);
+
+ return NOTIFY_DONE;
+}
+
+static int
+ocelot_netdevice_lag_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+ int err = NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct ocelot_port_private *priv = netdev_priv(lower);
+ struct ocelot_port *ocelot_port = &priv->port;
+
+ if (ocelot_port->bond != dev)
+ return NOTIFY_OK;
+
+ err = ocelot_netdevice_prechangeupper(dev, lower, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int
ocelot_netdevice_changelowerstate(struct net_device *dev,
struct netdev_lag_lower_state_info *info)
{
@@ -1382,6 +1397,17 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
+ case NETDEV_PRECHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (ocelot_netdevice_dev_check(dev))
+ return ocelot_netdevice_prechangeupper(dev, dev, info);
+
+ if (netif_is_lag_master(dev))
+ return ocelot_netdevice_lag_prechangeupper(dev, info);
+
+ break;
+ }
case NETDEV_CHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
@@ -1466,8 +1492,188 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
.notifier_call = ocelot_switchdev_blocking_event,
};
+static void vsc7514_phylink_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {};
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != ocelot_port->phy_mode) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Pause);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void vsc7514_phylink_mac_config(struct phylink_config *config,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
+ struct ocelot_port *ocelot_port = &priv->port;
+
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+}
+
+static void vsc7514_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
+ OCELOT_MAC_QUIRKS);
+}
+
+static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ocelot_port_private *priv = netdev_priv(ndev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
+ interface, speed, duplex,
+ tx_pause, rx_pause, OCELOT_MAC_QUIRKS);
+}
+
+static const struct phylink_mac_ops ocelot_phylink_ops = {
+ .validate = vsc7514_phylink_validate,
+ .mac_config = vsc7514_phylink_mac_config,
+ .mac_link_down = vsc7514_phylink_mac_link_down,
+ .mac_link_up = vsc7514_phylink_mac_link_up,
+};
+
+static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
+ struct device_node *portnp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct device *dev = ocelot->dev;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
+ int err;
+
+ of_get_phy_mode(portnp, &phy_mode);
+ /* DT bindings of internal PHY ports are broken and don't
+ * specify a phy-mode
+ */
+ if (phy_mode == PHY_INTERFACE_MODE_NA)
+ phy_mode = PHY_INTERFACE_MODE_INTERNAL;
+
+ if (phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+ phy_mode != PHY_INTERFACE_MODE_INTERNAL) {
+ dev_err(dev, "unsupported phy mode %s for port %d\n",
+ phy_modes(phy_mode), port);
+ return -EINVAL;
+ }
+
+ /* Ensure clock signals and speed are set on all QSGMII links */
+ if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
+ ocelot_port_rmwl(ocelot_port, 0,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG);
+
+ ocelot_port->phy_mode = phy_mode;
+
+ if (phy_mode != PHY_INTERFACE_MODE_INTERNAL) {
+ struct phy *serdes = of_phy_get(portnp, NULL);
+
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ dev_err_probe(dev, err,
+ "missing SerDes phys for port %d\n",
+ port);
+ return err;
+ }
+
+ err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET, phy_mode);
+ of_phy_put(serdes);
+ if (err) {
+ dev_err(dev, "Could not SerDes mode on port %d: %pe\n",
+ port, ERR_PTR(err));
+ return err;
+ }
+ }
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+
+ priv->phylink_config.dev = &priv->dev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(portnp),
+ phy_mode, &ocelot_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ dev_err(dev, "Could not create phylink (%pe)\n", phylink);
+ return err;
+ }
+
+ priv->phylink = phylink;
+
+ err = phylink_of_phy_connect(phylink, portnp, 0);
+ if (err) {
+ dev_err(dev, "Could not connect to PHY: %pe\n", ERR_PTR(err));
+ phylink_destroy(phylink);
+ priv->phylink = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
- struct phy_device *phy)
+ struct device_node *portnp)
{
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
@@ -1480,7 +1686,6 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
SET_NETDEV_DEV(dev, ocelot->dev);
priv = netdev_priv(dev);
priv->dev = dev;
- priv->phy = phy;
priv->chip_port = port;
ocelot_port = &priv->port;
ocelot_port->ocelot = ocelot;
@@ -1501,15 +1706,23 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
ocelot_init_port(ocelot, port);
+ err = ocelot_port_phylink_create(ocelot, port, portnp);
+ if (err)
+ goto out;
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- free_netdev(dev);
- ocelot->ports[port] = NULL;
- return err;
+ goto out;
}
return 0;
+
+out:
+ ocelot->ports[port] = NULL;
+ free_netdev(dev);
+
+ return err;
}
void ocelot_release_port(struct ocelot_port *ocelot_port)
@@ -1519,5 +1732,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port)
port);
unregister_netdev(priv->dev);
+
+ if (priv->phylink) {
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
+
+ phylink_destroy(priv->phylink);
+ }
+
free_netdev(priv->dev);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 4bd7e9d9ec61..18aed504f45d 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
+#include <linux/phylink.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
@@ -945,13 +946,9 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
- struct device_node *phy_node;
struct devlink_port *dlp;
- phy_interface_t phy_mode;
- struct phy_device *phy;
struct regmap *target;
struct resource *res;
- struct phy *serdes;
char res_name[8];
if (of_property_read_u32(portnp, "reg", &reg))
@@ -975,15 +972,6 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
goto out_teardown;
}
- phy_node = of_parse_phandle(portnp, "phy-handle", 0);
- if (!phy_node)
- continue;
-
- phy = of_phy_find_device(phy_node);
- of_node_put(phy_node);
- if (!phy)
- continue;
-
err = ocelot_port_devlink_init(ocelot, port,
DEVLINK_PORT_FLAVOUR_PHYSICAL);
if (err) {
@@ -992,7 +980,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
}
devlink_ports_registered |= BIT(port);
- err = ocelot_probe_port(ocelot, port, target, phy);
+ err = ocelot_probe_port(ocelot, port, target, portnp);
if (err) {
of_node_put(portnp);
goto out_teardown;
@@ -1003,49 +991,6 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
port);
dlp = &ocelot->devlink_ports[port];
devlink_port_type_eth_set(dlp, priv->dev);
-
- of_get_phy_mode(portnp, &phy_mode);
-
- ocelot_port->phy_mode = phy_mode;
-
- switch (ocelot_port->phy_mode) {
- case PHY_INTERFACE_MODE_NA:
- continue;
- case PHY_INTERFACE_MODE_SGMII:
- break;
- case PHY_INTERFACE_MODE_QSGMII:
- /* Ensure clock signals and speed is set on all
- * QSGMII links
- */
- ocelot_port_writel(ocelot_port,
- DEV_CLOCK_CFG_LINK_SPEED
- (OCELOT_SPEED_1000),
- DEV_CLOCK_CFG);
- break;
- default:
- dev_err(ocelot->dev,
- "invalid phy mode for port%d, (Q)SGMII only\n",
- port);
- of_node_put(portnp);
- err = -EINVAL;
- goto out_teardown;
- }
-
- serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = PTR_ERR(serdes);
- if (err == -EPROBE_DEFER)
- dev_dbg(ocelot->dev, "deferring probe\n");
- else
- dev_err(ocelot->dev,
- "missing SerDes phys for port%d\n",
- port);
-
- of_node_put(portnp);
- goto out_teardown;
- }
-
- priv->serdes = serdes;
}
/* Initialize unused devlink ports at the end */
@@ -1103,7 +1048,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (!np && !pdev->dev.platform_data)
return -ENODEV;
- devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot));
+ devlink =
+ devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot), &pdev->dev);
if (!devlink)
return -ENOMEM;
@@ -1187,7 +1133,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- err = devlink_register(devlink, ocelot->dev);
+ err = devlink_register(devlink);
if (err)
goto out_ocelot_deinit;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index ce3eca5d152b..d74a80f010c5 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -193,8 +193,6 @@ static int jazz_sonic_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- netdev_boot_setup_check(dev);
-
dev->base_addr = res->start;
dev->irq = platform_get_irq(pdev, 0);
err = sonic_probe1(dev);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 84f7dbe9edff..3f982033944b 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -790,7 +790,7 @@ static const struct net_device_ops natsemi_netdev_ops = {
.ndo_get_stats = get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = natsemi_change_mtu,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = ns_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 28d9e98db81a..ca4686094701 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -215,7 +215,6 @@ int xtsonic_probe(struct platform_device *pdev)
lp->device = &pdev->dev;
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- netdev_boot_setup_check(dev);
dev->base_addr = resmem->start;
dev->irq = resirq->start;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 0b017d4f5c08..09c0e839cca5 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7625,7 +7625,7 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_start_xmit = s2io_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = s2io_ndo_set_multicast,
- .ndo_do_ioctl = s2io_ioctl,
+ .ndo_eth_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
.ndo_set_features = s2io_set_features,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 7abd13e69471..df4a3f3da83a 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3339,7 +3339,7 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = vxge_set_multicast,
- .ndo_do_ioctl = vxge_ioctl,
+ .ndo_eth_ioctl = vxge_ioctl,
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
.ndo_fix_features = vxge_fix_features,
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index b82758d5beed..8844d1ac053a 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -23,6 +23,7 @@ config NFP
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
select CRC32
+ select DIMLIB
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1cbe2c9f3959..2a432de11858 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -262,10 +262,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
}
static bool
-nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
+nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
{
- struct flow_action_entry *act = flow->rule->action.entries;
- int num_act = flow->rule->action.num_entries;
+ struct flow_action_entry *act = rule->action.entries;
+ int num_act = rule->action.num_entries;
int act_idx;
/* Preparse action list for next mirred or redirect action */
@@ -279,7 +279,7 @@ nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
@@ -288,7 +288,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app,
/* Determine the tunnel type based on the egress netdev
* in the mirred action for tunnels without l4.
*/
- if (nfp_flower_tun_is_gre(flow, act_idx))
+ if (nfp_flower_tun_is_gre(rule, act_idx))
return NFP_FL_TUNNEL_GRE;
switch (tun->key.tp_dst) {
@@ -788,11 +788,10 @@ struct nfp_flower_pedit_acts {
};
static int
-nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
+nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action,
int *a_len, struct nfp_flower_pedit_acts *set_act,
u32 *csum_updated)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
@@ -890,7 +889,7 @@ nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
static int
nfp_fl_pedit(const struct flow_action_entry *act,
- struct flow_cls_offload *flow, char *nfp_action, int *a_len,
+ char *nfp_action, int *a_len,
u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
struct netlink_ext_ack *extack)
{
@@ -977,7 +976,7 @@ nfp_flower_output_action(struct nfp_app *app,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -1045,7 +1044,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
+ *tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx);
if (*tun_type == NFP_FL_TUNNEL_NONE) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
@@ -1086,7 +1085,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
/* Tunnel decap is handled by default so accept action. */
return 0;
case FLOW_ACTION_MANGLE:
- if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
+ if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len],
a_len, csum_updated, set_act, extack))
return -EOPNOTSUPP;
break;
@@ -1195,7 +1194,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
}
int nfp_flower_compile_action(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
struct netlink_ext_ack *extack)
@@ -1207,7 +1206,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
bool pkt_host = false;
u32 csum_updated = 0;
- if (!flow_action_hw_stats_check(&flow->rule->action, extack,
+ if (!flow_action_hw_stats_check(&rule->action, extack,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
@@ -1219,18 +1218,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
- flow_action_for_each(i, act, &flow->rule->action) {
- if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ flow_action_for_each(i, act, &rule->action) {
+ if (nfp_fl_check_mangle_start(&rule->action, i))
memset(&set_act, 0, sizeof(set_act));
- err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
+ err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
&set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
- if (nfp_fl_check_mangle_end(&flow->rule->action, i))
- nfp_fl_commit_mangle(flow,
+ if (nfp_fl_check_mangle_end(&rule->action, i))
+ nfp_fl_commit_mangle(rule,
&nfp_flow->action_data[act_len],
&act_len, &set_act, &csum_updated);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 062bb2db68bf..bfd7d1c35076 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021 Corigine, Inc. */
#include "conntrack.h"
+#include "../nfp_port.h"
const struct rhashtable_params nfp_tc_ct_merge_params = {
.head_offset = offsetof(struct nfp_fl_ct_tc_merge,
@@ -407,15 +408,491 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
return -EINVAL;
}
+static int
+nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
+{
+ int key_size;
+
+ /* This field must always be present */
+ key_size = sizeof(struct nfp_flower_meta_tci);
+ map[FLOW_PAY_META_TCI] = 0;
+
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
+ map[FLOW_PAY_EXT_META] = key_size;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
+ map[FLOW_PAY_INPORT] = key_size;
+ key_size += sizeof(struct nfp_flower_in_port);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
+ map[FLOW_PAY_MAC_MPLS] = key_size;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
+ map[FLOW_PAY_L4] = key_size;
+ key_size += sizeof(struct nfp_flower_tp_ports);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
+ map[FLOW_PAY_IPV4] = key_size;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ }
+ if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
+ map[FLOW_PAY_IPV6] = key_size;
+ key_size += sizeof(struct nfp_flower_ipv6);
+ }
+
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ map[FLOW_PAY_GRE] = key_size;
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
+ key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
+ else
+ key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
+ }
+
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ map[FLOW_PAY_QINQ] = key_size;
+ key_size += sizeof(struct nfp_flower_vlan);
+ }
+
+ if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
+ (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
+ map[FLOW_PAY_UDP_TUN] = key_size;
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
+ key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
+ else
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
+
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ map[FLOW_PAY_GENEVE_OPT] = key_size;
+ key_size += sizeof(struct nfp_flower_geneve_options);
+ }
+
+ return key_size;
+}
+
+static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
+ struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct nfp_fl_payload *flow_pay)
+{
+ struct flow_action_entry *a_in;
+ int i, j, num_actions, id;
+ struct flow_rule *a_rule;
+ int err = 0, offset = 0;
+
+ num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
+ rules[CT_TYPE_NFT]->action.num_entries +
+ rules[CT_TYPE_POST_CT]->action.num_entries;
+
+ a_rule = flow_rule_alloc(num_actions);
+ if (!a_rule)
+ return -ENOMEM;
+
+ /* Actions need a BASIC dissector. */
+ a_rule->match = rules[CT_TYPE_PRE_CT]->match;
+
+ /* Copy actions */
+ for (j = 0; j < _CT_TYPE_MAX; j++) {
+ if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ /* ip_proto is the only field that needed in later compile_action,
+ * needed to set the correct checksum flags. It doesn't really matter
+ * which input rule's ip_proto field we take as the earlier merge checks
+ * would have made sure that they don't conflict. We do not know which
+ * of the subflows would have the ip_proto filled in, so we need to iterate
+ * through the subflows and assign the proper subflow to a_rule
+ */
+ flow_rule_match_basic(rules[j], &match);
+ if (match.mask->ip_proto)
+ a_rule->match = rules[j]->match;
+ }
+
+ for (i = 0; i < rules[j]->action.num_entries; i++) {
+ a_in = &rules[j]->action.entries[i];
+ id = a_in->id;
+
+ /* Ignore CT related actions as these would already have
+ * been taken care of by previous checks, and we do not send
+ * any CT actions to the firmware.
+ */
+ switch (id) {
+ case FLOW_ACTION_CT:
+ case FLOW_ACTION_GOTO:
+ case FLOW_ACTION_CT_METADATA:
+ continue;
+ default:
+ memcpy(&a_rule->action.entries[offset++],
+ a_in, sizeof(struct flow_action_entry));
+ break;
+ }
+ }
+ }
+
+ /* Some actions would have been ignored, so update the num_entries field */
+ a_rule->action.num_entries = offset;
+ err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
+ kfree(a_rule);
+
+ return err;
+}
+
static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
{
- return 0;
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
+ struct nfp_fl_key_ls key_layer, tmp_layer;
+ struct nfp_flower_priv *priv = zt->priv;
+ u16 key_map[_FLOW_PAY_LAYERS_MAX];
+ struct nfp_fl_payload *flow_pay;
+
+ struct flow_rule *rules[_CT_TYPE_MAX];
+ u8 *key, *msk, *kdata, *mdata;
+ struct nfp_port *port = NULL;
+ struct net_device *netdev;
+ bool qinq_sup;
+ u32 port_id;
+ u16 offset;
+ int i, err;
+
+ netdev = m_entry->netdev;
+ qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
+
+ rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
+ rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
+ rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
+
+ memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
+ memset(&key_map, 0, sizeof(key_map));
+
+ /* Calculate the resultant key layer and size for offload */
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ err = nfp_flower_calculate_key_layers(priv->app,
+ m_entry->netdev,
+ &tmp_layer, rules[i],
+ &tun_type, NULL);
+ if (err)
+ return err;
+
+ key_layer.key_layer |= tmp_layer.key_layer;
+ key_layer.key_layer_two |= tmp_layer.key_layer_two;
+ }
+ key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
+
+ flow_pay = nfp_flower_allocate_new(&key_layer);
+ if (!flow_pay)
+ return -ENOMEM;
+
+ memset(flow_pay->unmasked_data, 0, key_layer.key_size);
+ memset(flow_pay->mask_data, 0, key_layer.key_size);
+
+ kdata = flow_pay->unmasked_data;
+ mdata = flow_pay->mask_data;
+
+ offset = key_map[FLOW_PAY_META_TCI];
+ key = kdata + offset;
+ msk = mdata + offset;
+ nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
+ (struct nfp_flower_meta_tci *)msk,
+ key_layer.key_layer);
+
+ if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_EXT_META];
+ key = kdata + offset;
+ msk = mdata + offset;
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
+ key_layer.key_layer_two);
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+ key_layer.key_layer_two);
+ }
+
+ /* Using in_port from the -trk rule. The tc merge checks should already
+ * be checking that the ingress netdevs are the same
+ */
+ port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
+ offset = key_map[FLOW_PAY_INPORT];
+ key = kdata + offset;
+ msk = mdata + offset;
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
+ port_id, false, tun_type, NULL);
+ if (err)
+ goto ct_offload_err;
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ port_id, true, tun_type, NULL);
+ if (err)
+ goto ct_offload_err;
+
+ /* This following part works on the assumption that previous checks has
+ * already filtered out flows that has different values for the different
+ * layers. Here we iterate through all three rules and merge their respective
+ * masked value(cared bits), basic method is:
+ * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
+ * final_mask = r1_mask | r2_mask | r3_mask
+ * If none of the rules contains a match that is also fine, that simply means
+ * that the layer is not present.
+ */
+ if (!qinq_sup) {
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ offset = key_map[FLOW_PAY_META_TCI];
+ key = kdata + offset;
+ msk = mdata + offset;
+ nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
+ (struct nfp_flower_meta_tci *)msk,
+ rules[i]);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_MAC_MPLS];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
+ (struct nfp_flower_mac_mpls *)msk,
+ rules[i]);
+ err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
+ (struct nfp_flower_mac_mpls *)msk,
+ rules[i], NULL);
+ if (err)
+ goto ct_offload_err;
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_IPV4];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
+ (struct nfp_flower_ipv4 *)msk,
+ rules[i]);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_IPV6];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
+ (struct nfp_flower_ipv6 *)msk,
+ rules[i]);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
+ offset = key_map[FLOW_PAY_L4];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
+ (struct nfp_flower_tp_ports *)msk,
+ rules[i]);
+ }
+ }
+
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ offset = key_map[FLOW_PAY_GRE];
+ key = kdata + offset;
+ msk = mdata + offset;
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_gre_tun *gre_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv6_gre_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
+ dst = &gre_match->ipv6.dst;
+
+ entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
+ if (!entry) {
+ err = -ENOMEM;
+ goto ct_offload_err;
+ }
+
+ flow_pay->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv4_gre_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ flow_pay->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(priv->app, dst);
+ }
+ }
+
+ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+ offset = key_map[FLOW_PAY_QINQ];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+ (struct nfp_flower_vlan *)msk,
+ rules[i]);
+ }
+ }
+
+ if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ offset = key_map[FLOW_PAY_UDP_TUN];
+ key = kdata + offset;
+ msk = mdata + offset;
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
+ struct nfp_flower_ipv6_udp_tun *udp_match;
+ struct nfp_ipv6_addr_entry *entry;
+ struct in6_addr *dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv6_udp_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
+ dst = &udp_match->ipv6.dst;
+
+ entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
+ if (!entry) {
+ err = -ENOMEM;
+ goto ct_offload_err;
+ }
+
+ flow_pay->nfp_tun_ipv6 = entry;
+ } else {
+ __be32 dst;
+
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_ipv4_udp_tun((void *)key,
+ (void *)msk, rules[i]);
+ }
+ dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ flow_pay->nfp_tun_ipv4_addr = dst;
+ nfp_tunnel_add_ipv4_off(priv->app, dst);
+ }
+
+ if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
+ offset = key_map[FLOW_PAY_GENEVE_OPT];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++)
+ nfp_flower_compile_geneve_opt(key, msk, rules[i]);
+ }
+ }
+
+ /* Merge actions into flow_pay */
+ err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
+ if (err)
+ goto ct_offload_err;
+
+ /* Use the pointer address as the cookie, but set the last bit to 1.
+ * This is to avoid the 'is_merge_flow' check from detecting this as
+ * an already merged flow. This works since address alignment means
+ * that the last bit for pointer addresses will be 0.
+ */
+ flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
+ err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
+ flow_pay, netdev, NULL);
+ if (err)
+ goto ct_offload_err;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto ct_release_offload_meta_err;
+
+ err = nfp_flower_xmit_flow(priv->app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (err)
+ goto ct_remove_rhash_err;
+
+ m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
+ m_entry->flow_pay = flow_pay;
+
+ if (port)
+ port->tc_offload_cnt++;
+
+ return err;
+
+ct_remove_rhash_err:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &flow_pay->fl_node,
+ nfp_flower_table_params));
+ct_release_offload_meta_err:
+ nfp_modify_flow_metadata(priv->app, flow_pay);
+ct_offload_err:
+ if (flow_pay->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
+ kfree(flow_pay->action_data);
+ kfree(flow_pay->mask_data);
+ kfree(flow_pay->unmasked_data);
+ kfree(flow_pay);
+ return err;
}
static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
struct net_device *netdev)
{
- return 0;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *flow_pay;
+ struct nfp_port *port = NULL;
+ int err = 0;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
+ if (!flow_pay)
+ return -ENOENT;
+
+ err = nfp_modify_flow_metadata(app, flow_pay);
+ if (err)
+ goto err_free_merge_flow;
+
+ if (flow_pay->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
+
+ if (flow_pay->nfp_tun_ipv6)
+ nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
+
+ if (!flow_pay->in_hw) {
+ err = 0;
+ goto err_free_merge_flow;
+ }
+
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+
+err_free_merge_flow:
+ nfp_flower_del_linked_merge_flows(app, flow_pay);
+ if (port)
+ port->tc_offload_cnt--;
+ kfree(flow_pay->action_data);
+ kfree(flow_pay->mask_data);
+ kfree(flow_pay->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &flow_pay->fl_node,
+ nfp_flower_table_params));
+ kfree_rcu(flow_pay, rcu);
+ return err;
}
static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
@@ -1048,6 +1525,139 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
return 0;
}
+static void
+nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
+ enum ct_entry_type type, u64 *m_pkts,
+ u64 *m_bytes, u64 *m_used)
+{
+ struct nfp_flower_priv *priv = nft_merge->zt->priv;
+ struct nfp_fl_payload *nfp_flow;
+ u32 ctx_id;
+
+ nfp_flow = nft_merge->flow_pay;
+ if (!nfp_flow)
+ return;
+
+ ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+ *m_pkts += priv->stats[ctx_id].pkts;
+ *m_bytes += priv->stats[ctx_id].bytes;
+ *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
+
+ /* If request is for a sub_flow which is part of a tunnel merged
+ * flow then update stats from tunnel merged flows first.
+ */
+ if (!list_empty(&nfp_flow->linked_flows))
+ nfp_flower_update_merge_stats(priv->app, nfp_flow);
+
+ if (type != CT_TYPE_NFT) {
+ /* Update nft cached stats */
+ flow_stats_update(&nft_merge->nft_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ } else {
+ /* Update pre_ct cached stats */
+ flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ /* Update post_ct cached stats */
+ flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ /* Reset stats from the nfp */
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+}
+
+int nfp_fl_ct_stats(struct flow_cls_offload *flow,
+ struct nfp_fl_ct_map_entry *ct_map_ent)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
+ struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
+ struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
+
+ u64 pkts = 0, bytes = 0, used = 0;
+ u64 m_pkts, m_bytes, m_used;
+
+ spin_lock_bh(&ct_entry->zt->priv->stats_lock);
+
+ if (ct_entry->type == CT_TYPE_PRE_CT) {
+ /* Iterate tc_merge entries associated with this flow */
+ list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
+ pre_ct_list) {
+ m_pkts = 0;
+ m_bytes = 0;
+ m_used = 0;
+ /* Iterate nft_merge entries associated with this tc_merge flow */
+ list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
+ tc_merge_list) {
+ nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
+ &m_pkts, &m_bytes, &m_used);
+ }
+ pkts += m_pkts;
+ bytes += m_bytes;
+ used = max_t(u64, used, m_used);
+ /* Update post_ct partner */
+ flow_stats_update(&tc_merge->post_ct_parent->stats,
+ m_bytes, m_pkts, 0, m_used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ } else if (ct_entry->type == CT_TYPE_POST_CT) {
+ /* Iterate tc_merge entries associated with this flow */
+ list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
+ post_ct_list) {
+ m_pkts = 0;
+ m_bytes = 0;
+ m_used = 0;
+ /* Iterate nft_merge entries associated with this tc_merge flow */
+ list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
+ tc_merge_list) {
+ nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
+ &m_pkts, &m_bytes, &m_used);
+ }
+ pkts += m_pkts;
+ bytes += m_bytes;
+ used = max_t(u64, used, m_used);
+ /* Update pre_ct partner */
+ flow_stats_update(&tc_merge->pre_ct_parent->stats,
+ m_bytes, m_pkts, 0, m_used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ } else {
+ /* Iterate nft_merge entries associated with this nft flow */
+ list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
+ nft_flow_list) {
+ nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
+ &pkts, &bytes, &used);
+ }
+ }
+
+ /* Add stats from this request to stats potentially cached by
+ * previous requests.
+ */
+ flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ /* Finally update the flow stats from the original stats request */
+ flow_stats_update(&flow->stats, ct_entry->stats.bytes,
+ ct_entry->stats.pkts, 0,
+ ct_entry->stats.lastused,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ /* Stats has been synced to original flow, can now clear
+ * the cache.
+ */
+ ct_entry->stats.pkts = 0;
+ ct_entry->stats.bytes = 0;
+ spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
+
+ return 0;
+}
+
static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
{
@@ -1080,7 +1690,11 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
nfp_ct_map_params);
return nfp_fl_ct_del_flow(ct_map_ent);
case FLOW_CLS_STATS:
- return 0;
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent)
+ return nfp_fl_ct_stats(flow, ct_map_ent);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index 170b6cdb8cd0..beb6cceff9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -83,6 +83,24 @@ enum ct_entry_type {
CT_TYPE_PRE_CT,
CT_TYPE_NFT,
CT_TYPE_POST_CT,
+ _CT_TYPE_MAX,
+};
+
+enum nfp_nfp_layer_name {
+ FLOW_PAY_META_TCI = 0,
+ FLOW_PAY_INPORT,
+ FLOW_PAY_EXT_META,
+ FLOW_PAY_MAC_MPLS,
+ FLOW_PAY_L4,
+ FLOW_PAY_IPV4,
+ FLOW_PAY_IPV6,
+ FLOW_PAY_CT,
+ FLOW_PAY_GRE,
+ FLOW_PAY_QINQ,
+ FLOW_PAY_UDP_TUN,
+ FLOW_PAY_GENEVE_OPT,
+
+ _FLOW_PAY_LAYERS_MAX
};
/**
@@ -228,4 +246,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
*/
int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
void *cb_priv);
+
+/**
+ * nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows
+ * @flow: TC flower classifier offload structure.
+ * @ct_map_ent: ct map entry for the flow that needs deleting
+ */
+int nfp_fl_ct_stats(struct flow_cls_offload *flow,
+ struct nfp_fl_ct_map_entry *ct_map_ent);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 0fbd682ccf72..917c450a7aad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -413,20 +413,73 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
+void
+nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk, u8 key_type);
+void
+nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext);
+int
+nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
+ bool mask_version, enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack);
+void
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule);
+int
+nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule,
+ struct netlink_ext_ack *extack);
+void
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
+ struct nfp_flower_tp_ports *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule);
+void
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule);
+void
+nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule);
+void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
+ struct nfp_flower_ipv4_udp_tun *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
+ struct nfp_flower_ipv6_udp_tun *msk,
+ struct flow_rule *rule);
+void
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
+ struct nfp_flower_ipv6_gre_tun *msk,
+ struct flow_rule *rule);
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack);
int nfp_flower_compile_action(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
struct netlink_ext_ack *extack);
-int nfp_compile_flow_metadata(struct nfp_app *app,
- struct flow_cls_offload *flow,
+int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev,
struct netlink_ext_ack *extack);
@@ -498,4 +551,22 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
+
+struct nfp_fl_payload *
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer);
+int nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct net_device *netdev,
+ struct nfp_fl_key_ls *ret_key_ls,
+ struct flow_rule *flow,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack);
+void
+nfp_flower_del_linked_merge_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow);
+int
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+ u8 mtype);
+void
+nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 255a4dff6288..9d86eea4dc16 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -7,51 +7,68 @@
#include "cmsg.h"
#include "main.h"
-static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
- struct nfp_flower_meta_tci *msk,
- struct flow_rule *rule, u8 key_type, bool qinq_sup)
+void
+nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk, u8 key_type)
{
- u16 tmp_tci;
-
- memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
- memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
-
/* Populate the metadata frame. */
ext->nfp_flow_key_layer = key_type;
ext->mask_id = ~0;
msk->nfp_flow_key_layer = key_type;
msk->mask_id = ~0;
+}
- if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+void
+nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct flow_rule *rule)
+{
+ u16 msk_tci, key_tci;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
- tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
match.key->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
match.key->vlan_id);
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
- tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
match.mask->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
match.mask->vlan_id);
- msk->tci = cpu_to_be16(tmp_tci);
+
+ ext->tci |= cpu_to_be16((key_tci & msk_tci));
+ msk->tci |= cpu_to_be16(msk_tci);
}
}
static void
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct flow_rule *rule, u8 key_type, bool qinq_sup)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
+ memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
+
+ nfp_flower_compile_meta(ext, msk, key_type);
+
+ if (!qinq_sup)
+ nfp_flower_compile_tci(ext, msk, rule);
+}
+
+void
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
-static int
+int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
bool mask_version, enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
@@ -74,28 +91,37 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
-static int
+void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
- struct netlink_ext_ack *extack)
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
- memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
+ int i;
flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
- ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
- ether_addr_copy(ext->mac_src, &match.key->src[0]);
- ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
- ether_addr_copy(msk->mac_src, &match.mask->src[0]);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ext->mac_dst[i] |= match.key->dst[i] &
+ match.mask->dst[i];
+ msk->mac_dst[i] |= match.mask->dst[i];
+ ext->mac_src[i] |= match.key->src[i] &
+ match.mask->src[i];
+ msk->mac_src[i] |= match.mask->src[i];
+ }
}
+}
+int
+nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
+{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
- u32 t_mpls;
+ u32 key_mpls, msk_mpls;
flow_rule_match_mpls(rule, &match);
@@ -106,22 +132,24 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
return -EOPNOTSUPP;
}
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
- match.key->ls[0].mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
- match.key->ls[0].mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
- match.key->ls[0].mpls_bos) |
- NFP_FLOWER_MASK_MPLS_Q;
- ext->mpls_lse = cpu_to_be32(t_mpls);
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
- match.mask->ls[0].mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
- match.mask->ls[0].mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
- match.mask->ls[0].mpls_bos) |
- NFP_FLOWER_MASK_MPLS_Q;
- msk->mpls_lse = cpu_to_be32(t_mpls);
+ key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.key->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.key->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[0].mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[0].mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
+ msk->mpls_lse |= cpu_to_be32(msk_mpls);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
* bit, which indicates an mpls ether type but without any
@@ -132,30 +160,41 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
flow_rule_match_basic(rule, &match);
if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
- ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
- msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
return 0;
}
-static void
+static int
+nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
+ memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
+
+ nfp_flower_compile_mac(ext, msk, rule);
+
+ return nfp_flower_compile_mpls(ext, msk, rule, extack);
+}
+
+void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
- memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- ext->port_src = match.key->src;
- ext->port_dst = match.key->dst;
- msk->port_src = match.mask->src;
- msk->port_dst = match.mask->dst;
+ ext->port_src |= match.key->src & match.mask->src;
+ ext->port_dst |= match.key->dst & match.mask->dst;
+ msk->port_src |= match.mask->src;
+ msk->port_dst |= match.mask->dst;
}
}
@@ -167,18 +206,18 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
- ext->proto = match.key->ip_proto;
- msk->proto = match.mask->ip_proto;
+ ext->proto |= match.key->ip_proto & match.mask->ip_proto;
+ msk->proto |= match.mask->ip_proto;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
+ ext->tos |= match.key->tos & match.mask->tos;
+ ext->ttl |= match.key->ttl & match.mask->ttl;
+ msk->tos |= match.mask->tos;
+ msk->ttl |= match.mask->ttl;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
@@ -231,99 +270,108 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
}
static void
-nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
- struct nfp_flower_vlan *frame,
- bool outer_vlan)
+nfp_flower_fill_vlan(struct flow_match_vlan *match,
+ struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk, bool outer_vlan)
{
- u16 tci;
-
- tci = NFP_FLOWER_MASK_VLAN_PRESENT;
- tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- key->vlan_id);
+ struct flow_dissector_key_vlan *mask = match->mask;
+ struct flow_dissector_key_vlan *key = match->key;
+ u16 msk_tci, key_tci;
+
+ key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ key->vlan_id);
+ msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ mask->vlan_id);
if (outer_vlan) {
- frame->outer_tci = cpu_to_be16(tci);
- frame->outer_tpid = key->vlan_tpid;
+ ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
+ ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
+ msk->outer_tci |= cpu_to_be16(msk_tci);
+ msk->outer_tpid |= mask->vlan_tpid;
} else {
- frame->inner_tci = cpu_to_be16(tci);
- frame->inner_tpid = key->vlan_tpid;
+ ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
+ ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
+ msk->inner_tci |= cpu_to_be16(msk_tci);
+ msk->inner_tpid |= mask->vlan_tpid;
}
}
-static void
+void
nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
struct nfp_flower_vlan *msk,
struct flow_rule *rule)
{
struct flow_match_vlan match;
- memset(ext, 0, sizeof(struct nfp_flower_vlan));
- memset(msk, 0, sizeof(struct nfp_flower_vlan));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
flow_rule_match_vlan(rule, &match);
- nfp_flower_fill_vlan(match.key, ext, true);
- nfp_flower_fill_vlan(match.mask, msk, true);
+ nfp_flower_fill_vlan(&match, ext, msk, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
flow_rule_match_cvlan(rule, &match);
- nfp_flower_fill_vlan(match.key, ext, false);
- nfp_flower_fill_vlan(match.mask, msk, false);
+ nfp_flower_fill_vlan(&match, ext, msk, false);
}
}
-static void
+void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
- struct flow_match_ipv4_addrs match;
-
- memset(ext, 0, sizeof(struct nfp_flower_ipv4));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
flow_rule_match_ipv4_addrs(rule, &match);
- ext->ipv4_src = match.key->src;
- ext->ipv4_dst = match.key->dst;
- msk->ipv4_src = match.mask->src;
- msk->ipv4_dst = match.mask->dst;
+ ext->ipv4_src |= match.key->src & match.mask->src;
+ ext->ipv4_dst |= match.key->dst & match.mask->dst;
+ msk->ipv4_src |= match.mask->src;
+ msk->ipv4_dst |= match.mask->dst;
}
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
-static void
+void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv6));
- memset(msk, 0, sizeof(struct nfp_flower_ipv6));
-
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
+ int i;
flow_rule_match_ipv6_addrs(rule, &match);
- ext->ipv6_src = match.key->src;
- ext->ipv6_dst = match.key->dst;
- msk->ipv6_src = match.mask->src;
- msk->ipv6_dst = match.mask->dst;
+ for (i = 0; i < sizeof(ext->ipv6_src); i++) {
+ ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
+ match.mask->src.s6_addr[i];
+ ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
+ match.mask->dst.s6_addr[i];
+ msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
+ msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
+ }
}
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
}
-static int
-nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
+void
+nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
{
struct flow_match_enc_opts match;
+ int i;
- flow_rule_match_enc_opts(rule, &match);
- memcpy(ext, match.key->data, match.key->len);
- memcpy(msk, match.mask->data, match.mask->len);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ flow_rule_match_enc_opts(rule, &match);
- return 0;
+ for (i = 0; i < match.mask->len; i++) {
+ ext[i] |= match.key->data[i] & match.mask->data[i];
+ msk[i] |= match.mask->data[i];
+ }
+ }
}
static void
@@ -335,10 +383,10 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
- ext->src = match.key->src;
- ext->dst = match.key->dst;
- msk->src = match.mask->src;
- msk->dst = match.mask->dst;
+ ext->src |= match.key->src & match.mask->src;
+ ext->dst |= match.key->dst & match.mask->dst;
+ msk->src |= match.mask->src;
+ msk->dst |= match.mask->dst;
}
}
@@ -349,12 +397,17 @@ nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
+ int i;
flow_rule_match_enc_ipv6_addrs(rule, &match);
- ext->src = match.key->src;
- ext->dst = match.key->dst;
- msk->src = match.mask->src;
- msk->dst = match.mask->dst;
+ for (i = 0; i < sizeof(ext->src); i++) {
+ ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
+ match.mask->src.s6_addr[i];
+ ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
+ match.mask->dst.s6_addr[i];
+ msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
+ msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
+ }
}
}
@@ -367,10 +420,10 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
+ ext->tos |= match.key->tos & match.mask->tos;
+ ext->ttl |= match.key->ttl & match.mask->ttl;
+ msk->tos |= match.mask->tos;
+ msk->ttl |= match.mask->ttl;
}
}
@@ -383,10 +436,11 @@ nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
u32 vni;
flow_rule_match_enc_keyid(rule, &match);
- vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
- *key = cpu_to_be32(vni);
+ vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
+ NFP_FL_TUN_VNI_OFFSET;
+ *key |= cpu_to_be32(vni);
vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
- *key_msk = cpu_to_be32(vni);
+ *key_msk |= cpu_to_be32(vni);
}
}
@@ -398,22 +452,19 @@ nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
struct flow_match_enc_keyid match;
flow_rule_match_enc_keyid(rule, &match);
- *key = match.key->keyid;
- *key_msk = match.mask->keyid;
+ *key |= match.key->keyid & match.mask->keyid;
+ *key_msk |= match.mask->keyid;
*flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
*flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
}
}
-static void
+void
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
struct nfp_flower_ipv4_gre_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
-
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
@@ -424,40 +475,31 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
&ext->tun_flags, &msk->tun_flags, rule);
}
-static void
+void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
-
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
-static void
+void
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
struct nfp_flower_ipv6_udp_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
-
nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
}
-static void
+void
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
struct nfp_flower_ipv6_gre_tun *msk,
struct flow_rule *rule)
{
- memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
- memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
-
/* NVGRE is the only supported GRE tunnel type */
ext->ethertype = cpu_to_be16(ETH_P_TEB);
msk->ethertype = cpu_to_be16(~0);
@@ -469,14 +511,13 @@ nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type,
struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_flower_priv *priv = app->priv;
bool qinq_sup;
u32 port_id;
@@ -527,9 +568,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
- err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
- (struct nfp_flower_mac_mpls *)msk,
- rule, extack);
+ err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
+ (struct nfp_flower_mac_mpls *)msk,
+ rule, extack);
if (err)
return err;
@@ -640,9 +681,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, msk, rule);
- if (err)
- return err;
+ nfp_flower_compile_geneve_opt(ext, msk, rule);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 621113650a9b..2af9faee96c5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -290,8 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
return true;
}
-int nfp_compile_flow_metadata(struct nfp_app *app,
- struct flow_cls_offload *flow,
+int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev,
struct netlink_ext_ack *extack)
@@ -310,7 +309,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
}
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
- nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+ nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
nfp_flow->ingress_dev = netdev;
ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
@@ -357,7 +356,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
+ check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
if (check_entry) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
if (nfp_release_stats_entry(app, stats_cxt)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2406d33356ad..556c3495211d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -41,6 +41,8 @@
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
+ BIT(FLOW_DISSECTOR_KEY_CT) | \
+ BIT(FLOW_DISSECTOR_KEY_META) | \
BIT(FLOW_DISSECTOR_KEY_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
@@ -89,7 +91,7 @@ struct nfp_flower_merge_check {
};
};
-static int
+int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype)
{
@@ -134,20 +136,16 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
return 0;
}
-static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
+static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(f);
-
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
-static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
+static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(f);
-
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
@@ -236,15 +234,14 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
return 0;
}
-static int
+int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
- struct flow_cls_offload *flow,
+ struct flow_rule *rule,
enum nfp_flower_tun_type *tun_type,
struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
@@ -452,7 +449,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
return -EOPNOTSUPP;
}
- } else if (nfp_flower_check_higher_than_mac(flow)) {
+ } else if (nfp_flower_check_higher_than_mac(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
return -EOPNOTSUPP;
}
@@ -471,7 +468,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
- nfp_flower_check_higher_than_l3(flow)) {
+ nfp_flower_check_higher_than_l3(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
return -EOPNOTSUPP;
}
@@ -543,7 +540,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return 0;
}
-static struct nfp_fl_payload *
+struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
@@ -1005,9 +1002,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
- struct flow_cls_offload merge_tc_off;
struct nfp_flower_priv *priv = app->priv;
- struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
struct nfp_merge_info *merge_info;
@@ -1016,7 +1011,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
ASSERT_RTNL();
- extack = merge_tc_off.common.extack;
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@@ -1061,9 +1055,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
if (err)
goto err_unlink_sub_flow1;
- merge_tc_off.cookie = merge_flow->tc_flower_cookie;
- err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
- merge_flow->ingress_dev, extack);
+ err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
+ merge_flow->ingress_dev, NULL);
if (err)
goto err_unlink_sub_flow2;
@@ -1305,6 +1298,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct netlink_ext_ack *extack = NULL;
@@ -1330,7 +1324,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
+ err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
&tun_type, extack);
if (err)
goto err_free_key_ls;
@@ -1341,12 +1335,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
- err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+ err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
flow_pay, tun_type, extack);
if (err)
goto err_destroy_flow;
- err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
+ err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
if (err)
goto err_destroy_flow;
@@ -1356,7 +1350,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_destroy_flow;
}
- err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
+ err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
@@ -1476,7 +1470,7 @@ err_free_links:
kfree_rcu(merge_flow, rcu);
}
-static void
+void
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
@@ -1601,7 +1595,7 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
}
}
-static void
+void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow)
{
@@ -1628,10 +1622,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
+ /* Check ct_map table first */
+ ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent)
+ return nfp_fl_ct_stats(flow, ct_map_ent);
+
extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 742a420152b3..bb3b8a7f6c5d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -692,7 +692,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_pci_disable;
}
- devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf));
+ devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf), &pdev->dev);
if (!devlink) {
err = -ENOMEM;
goto err_rel_regions;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df5b748be068..df203738511b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/dim.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
@@ -360,6 +361,9 @@ struct nfp_net_rx_ring {
* @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
* @irq_entry: MSI-X table entry (use for talking to the device)
+ * @event_ctr: Number of interrupt
+ * @rx_dim: Dynamic interrupt moderation structure for RX
+ * @tx_dim: Dynamic interrupt moderation structure for TX
* @rx_sync: Seqlock for atomic updates of RX stats
* @rx_pkts: Number of received packets
* @rx_bytes: Number of received bytes
@@ -410,6 +414,10 @@ struct nfp_net_r_vector {
u16 irq_entry;
+ u16 event_ctr;
+ struct dim rx_dim;
+ struct dim tx_dim;
+
struct u64_stats_sync rx_sync;
u64 rx_pkts;
u64 rx_bytes;
@@ -571,6 +579,8 @@ struct nfp_net_dp {
* mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
+ * @rx_coalesce_adapt_on: Is RX interrupt moderation adaptive?
+ * @tx_coalesce_adapt_on: Is TX interrupt moderation adaptive?
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
* @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter
@@ -654,6 +664,8 @@ struct nfp_net {
struct semaphore bar_lock;
+ bool rx_coalesce_adapt_on;
+ bool tx_coalesce_adapt_on;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
u32 tx_coalesce_usecs;
@@ -919,6 +931,14 @@ static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
return netdev->netdev_ops == &nfp_net_netdev_ops;
}
+static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
+{
+ if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
+ return -EINVAL;
+
+ return 0;
+}
+
/* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 5dfa4799c34f..5bfa22accf2c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -474,6 +474,12 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
{
struct nfp_net_r_vector *r_vec = data;
+ /* Currently we cannot tell if it's a rx or tx interrupt,
+ * since dim does not need accurate event_ctr to calculate,
+ * we just use this counter for both rx and tx dim.
+ */
+ r_vec->event_ctr++;
+
napi_schedule_irqoff(&r_vec->napi);
/* The FW auto-masks any interrupt, either via the MASK bit in
@@ -1697,7 +1703,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_RESYNC_INFO:
if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
pkt_len))
- return NULL;
+ return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
default:
@@ -2061,6 +2067,36 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, pkts_polled))
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ if (r_vec->nfp_net->rx_coalesce_adapt_on) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
return pkts_polled;
}
@@ -2873,6 +2909,7 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
*/
static void nfp_net_close_stack(struct nfp_net *nn)
{
+ struct nfp_net_r_vector *r_vec;
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
@@ -2880,8 +2917,16 @@ static void nfp_net_close_stack(struct nfp_net *nn)
nn->link_up = false;
for (r = 0; r < nn->dp.num_r_vecs; r++) {
- disable_irq(nn->r_vecs[r].irq_vector);
- napi_disable(&nn->r_vecs[r].napi);
+ r_vec = &nn->r_vecs[r];
+
+ disable_irq(r_vec->irq_vector);
+ napi_disable(&r_vec->napi);
+
+ if (r_vec->rx_ring)
+ cancel_work_sync(&r_vec->rx_dim.work);
+
+ if (r_vec->tx_ring)
+ cancel_work_sync(&r_vec->tx_dim.work);
}
netif_tx_disable(nn->dp.netdev);
@@ -2948,17 +2993,92 @@ void nfp_ctrl_close(struct nfp_net *nn)
rtnl_unlock();
}
+static void nfp_net_rx_dim_work(struct work_struct *work)
+{
+ struct nfp_net_r_vector *r_vec;
+ unsigned int factor, value;
+ struct dim_cq_moder moder;
+ struct nfp_net *nn;
+ struct dim *dim;
+
+ dim = container_of(work, struct dim, work);
+ moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
+ nn = r_vec->nfp_net;
+
+ /* Compute factor used to convert coalesce '_usecs' parameters to
+ * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
+ * count.
+ */
+ factor = nn->tlv_caps.me_freq_mhz / 16;
+ if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ return;
+
+ /* copy RX interrupt coalesce parameters */
+ value = (moder.pkts << 16) | (factor * moder.usec);
+ rtnl_lock();
+ nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
+ (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
+ rtnl_unlock();
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void nfp_net_tx_dim_work(struct work_struct *work)
+{
+ struct nfp_net_r_vector *r_vec;
+ unsigned int factor, value;
+ struct dim_cq_moder moder;
+ struct nfp_net *nn;
+ struct dim *dim;
+
+ dim = container_of(work, struct dim, work);
+ moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
+ nn = r_vec->nfp_net;
+
+ /* Compute factor used to convert coalesce '_usecs' parameters to
+ * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
+ * count.
+ */
+ factor = nn->tlv_caps.me_freq_mhz / 16;
+ if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ return;
+
+ /* copy TX interrupt coalesce parameters */
+ value = (moder.pkts << 16) | (factor * moder.usec);
+ rtnl_lock();
+ nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
+ (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
+ rtnl_unlock();
+
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* nfp_net_open_stack() - Start the device from stack's perspective
* @nn: NFP Net device to reconfigure
*/
static void nfp_net_open_stack(struct nfp_net *nn)
{
+ struct nfp_net_r_vector *r_vec;
unsigned int r;
for (r = 0; r < nn->dp.num_r_vecs; r++) {
- napi_enable(&nn->r_vecs[r].napi);
- enable_irq(nn->r_vecs[r].irq_vector);
+ r_vec = &nn->r_vecs[r];
+
+ if (r_vec->rx_ring) {
+ INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
+ r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
+ if (r_vec->tx_ring) {
+ INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
+ r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
+ napi_enable(&r_vec->napi);
+ enable_irq(r_vec->irq_vector);
}
netif_tx_wake_all_queues(nn->dp.netdev);
@@ -3161,17 +3281,12 @@ static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
+ err = netif_set_real_num_queues(nn->dp.netdev,
+ nn->dp.num_stack_tx_rings,
+ nn->dp.num_rx_rings);
if (err)
return err;
- if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
- err = netif_set_real_num_tx_queues(nn->dp.netdev,
- nn->dp.num_stack_tx_rings);
- if (err)
- return err;
- }
-
return nfp_net_set_config_and_enable(nn);
}
@@ -3893,6 +4008,9 @@ static void nfp_net_irqmod_init(struct nfp_net *nn)
nn->rx_coalesce_max_frames = 64;
nn->tx_coalesce_usecs = 50;
nn->tx_coalesce_max_frames = 64;
+
+ nn->rx_coalesce_adapt_on = true;
+ nn->tx_coalesce_adapt_on = true;
}
static void nfp_net_netdev_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 8803faadd302..0bf2ff5717bc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1085,6 +1085,9 @@ static int nfp_net_get_coalesce(struct net_device *netdev,
if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
return -EINVAL;
+ ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on;
+ ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on;
+
ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
@@ -1361,19 +1364,18 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
return -EINVAL;
- if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
- return -EINVAL;
-
- if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
+ if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor,
+ ec->rx_max_coalesced_frames))
return -EINVAL;
- if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
- return -EINVAL;
-
- if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
+ if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor,
+ ec->tx_max_coalesced_frames))
return -EINVAL;
/* configuration is valid */
+ nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce;
+ nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce;
+
nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
@@ -1445,7 +1447,8 @@ static int nfp_net_set_channels(struct net_device *netdev,
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 921db40047d7..d10a93801344 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -701,7 +701,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
- err = devlink_register(devlink, &pf->pdev->dev);
+ err = devlink_register(devlink);
if (err)
goto err_app_clean;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 64c6842bd452..d29fe562b3de 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1219,7 +1219,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_stop = lpc_eth_close,
.ndo_start_xmit = lpc_eth_hard_start_xmit,
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = lpc_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index af84f72bf08e..4e18b64dceb9 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -6,6 +6,7 @@
config PCH_GBE
tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PTP_1588_CLOCK
select MII
select PTP_1588_CLOCK_PCH
select NET_PTP_CLASSIFY
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index e351f3d1608f..ec3e558f890e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1031,13 +1031,7 @@ static void pch_gbe_watchdog(struct timer_list *t)
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
netdev->tx_queue_len = adapter->tx_queue_len;
/* mii library handles link maintenance tasks */
- if (mii_ethtool_gset(&adapter->mii, &cmd)) {
- netdev_err(netdev, "ethtool get setting Error\n");
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies +
- PCH_GBE_WATCHDOG_PERIOD));
- return;
- }
+ mii_ethtool_gset(&adapter->mii, &cmd);
hw->mac.link_speed = ethtool_cmd_speed(&cmd);
hw->mac.link_duplex = cmd.duplex;
/* Set the RGMII control. */
@@ -2333,7 +2327,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
.ndo_tx_timeout = pch_gbe_tx_timeout,
.ndo_change_mtu = pch_gbe_change_mtu,
.ndo_set_features = pch_gbe_set_features,
- .ndo_do_ioctl = pch_gbe_ioctl,
+ .ndo_eth_ioctl = pch_gbe_ioctl,
.ndo_set_rx_mode = pch_gbe_set_multi,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = pch_gbe_netpoll,
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index ed832046216a..3426f6fa2b57 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -301,9 +301,7 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
int ret;
u16 mii_reg;
- ret = mii_ethtool_gset(&adapter->mii, &cmd);
- if (ret)
- netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n");
+ mii_ethtool_gset(&adapter->mii, &cmd);
ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
cmd.duplex = hw->mac.link_duplex;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index d058a63602a9..1a6336a56d3d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -546,7 +546,9 @@ static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int hamachi_open(struct net_device *dev);
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
static void hamachi_timer(struct timer_list *t);
static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void hamachi_init_ring(struct net_device *dev);
@@ -571,7 +573,8 @@ static const struct net_device_ops hamachi_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = hamachi_tx_timeout,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = hamachi_ioctl,
+ .ndo_siocdevprivate = hamachi_siocdevprivate,
};
@@ -1867,7 +1870,36 @@ static const struct ethtool_ops ethtool_ops_no_mii = {
.get_drvinfo = hamachi_get_drvinfo,
};
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+/* private ioctl: set rx,tx intr params */
+static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ u32 *d = (u32 *)&rq->ifr_ifru;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd != SIOCDEVPRIVATE + 3)
+ return -EOPNOTSUPP;
+
+ /* Should add this check here or an ordinary user can do nasty
+ * things. -KDU
+ *
+ * TODO: Shut down the Rx and Tx engines while doing this.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ writel(d[0], np->base + TxIntrCtrl);
+ writel(d[1], np->base + RxIntrCtrl);
+ printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
+ (u32)readl(np->base + TxIntrCtrl),
+ (u32)readl(np->base + RxIntrCtrl));
+
+ return 0;
+}
+
+static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct hamachi_private *np = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
@@ -1876,28 +1908,9 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
- u32 *d = (u32 *)&rq->ifr_ifru;
- /* Should add this check here or an ordinary user can do nasty
- * things. -KDU
- *
- * TODO: Shut down the Rx and Tx engines while doing this.
- */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- writel(d[0], np->base + TxIntrCtrl);
- writel(d[1], np->base + RxIntrCtrl);
- printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
- (u32) readl(np->base + TxIntrCtrl),
- (u32) readl(np->base + RxIntrCtrl));
- rc = 0;
- }
-
- else {
- spin_lock_irq(&np->lock);
- rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
- spin_unlock_irq(&np->lock);
- }
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
return rc;
}
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index d1dd9bc1bc7f..f5cd8f51be7c 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -362,7 +362,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_rx_mode = set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = yellowfin_tx_timeout,
};
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 202973a82712..3f7519e435b8 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
- depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index e4a5416adc80..7e296fa71b36 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -165,10 +165,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
goto out;
}
+ ionic->num_vfs++;
/* ignore failures from older FW, we just won't get stats */
(void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
(u8 *)&v->stats_pa);
- ionic->num_vfs++;
}
out:
@@ -373,9 +373,6 @@ static void ionic_remove(struct pci_dev *pdev)
{
struct ionic *ionic = pci_get_drvdata(pdev);
- if (!ionic)
- return;
-
del_timer_sync(&ionic->watchdog_timer);
if (ionic->lif) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 1dfe962e22e0..9aac647290f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -106,6 +106,8 @@ int ionic_dev_setup(struct ionic *ionic)
idev->last_fw_hb = 0;
idev->fw_hb_ready = true;
idev->fw_status_ready = true;
+ idev->fw_generation = IONIC_FW_STS_F_GENERATION &
+ ioread8(&idev->dev_info_regs->fw_status);
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
@@ -121,7 +123,9 @@ int ionic_heartbeat_check(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long check_time, last_check_time;
- bool fw_status_ready, fw_hb_ready;
+ bool fw_status_ready = true;
+ bool fw_hb_ready;
+ u8 fw_generation;
u8 fw_status;
u32 fw_hb;
@@ -140,9 +144,29 @@ do_check_time:
/* firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
+ * If fw_status is not ready don't bother with the generation.
*/
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- fw_status_ready = (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
+
+ if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) {
+ fw_status_ready = false;
+ } else {
+ fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
+ if (idev->fw_generation != fw_generation) {
+ dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n",
+ idev->fw_generation, fw_generation);
+
+ idev->fw_generation = fw_generation;
+
+ /* If the generation changed, the fw status is not
+ * ready so we need to trigger a fw-down cycle. After
+ * the down, the next watchdog will see the fw is up
+ * and the generation value stable, so will trigger
+ * the fw-up activity.
+ */
+ fw_status_ready = false;
+ }
+ }
/* is this a transition? */
if (fw_status_ready != idev->fw_status_ready) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c25cf9b744c5..8311086fb1f4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -143,6 +143,7 @@ struct ionic_dev {
u32 last_fw_hb;
bool fw_hb_ready;
bool fw_status_ready;
+ u8 fw_generation;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -160,8 +161,6 @@ struct ionic_dev {
struct ionic_cq_info {
union {
void *cq_desc;
- struct ionic_txq_comp *txcq;
- struct ionic_rxq_comp *rxcq;
struct ionic_admin_comp *admincq;
struct ionic_notifyq_event *notifyq;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index b41301a5b0df..c7d0e195d176 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -64,7 +64,7 @@ struct ionic *ionic_devlink_alloc(struct device *dev)
{
struct devlink *dl;
- dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic));
+ dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev);
return devlink_priv(dl);
}
@@ -82,7 +82,7 @@ int ionic_devlink_register(struct ionic *ionic)
struct devlink_port_attrs attrs = {};
int err;
- err = devlink_register(dl, ionic->dev);
+ err = devlink_register(dl);
if (err) {
dev_warn(ionic->dev, "devlink_register failed: %d\n", err);
return err;
@@ -91,20 +91,20 @@ int ionic_devlink_register(struct ionic *ionic)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(&ionic->dl_port, &attrs);
err = devlink_port_register(dl, &ionic->dl_port, 0);
- if (err)
+ if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- else
- devlink_port_type_eth_set(&ionic->dl_port,
- ionic->lif->netdev);
+ devlink_unregister(dl);
+ return err;
+ }
- return err;
+ devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ return 0;
}
void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- if (ionic->dl_port.registered)
- devlink_port_unregister(&ionic->dl_port);
+ devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 6583be570e45..adc9fdb03e86 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -32,6 +32,9 @@ static void ionic_get_stats(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
u32 i;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
memset(buf, 0, stats->n_stats * sizeof(*buf));
for (i = 0; i < ionic_num_stats_grps; i++)
ionic_stats_groups[i].get_values(lif, &buf);
@@ -274,6 +277,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
int err = 0;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
/* set autoneg */
if (ks->base.autoneg != idev->port_info->config.an_enable) {
mutex_lock(&ionic->dev_cmd_lock);
@@ -320,6 +326,9 @@ static int ionic_set_pauseparam(struct net_device *netdev,
u32 requested_pause;
int err;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -372,6 +381,9 @@ static int ionic_set_fecparam(struct net_device *netdev,
u8 fec_type;
int ret = 0;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
if (lif->ionic->idev.port_info->config.an_enable) {
netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n");
return -EINVAL;
@@ -528,6 +540,9 @@ static int ionic_set_ringparam(struct net_device *netdev,
struct ionic_queue_params qparam;
int err;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
@@ -597,6 +612,9 @@ static int ionic_set_channels(struct net_device *netdev,
int max_cnt;
int err;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
@@ -947,6 +965,9 @@ static int ionic_nway_reset(struct net_device *netdev)
struct ionic *ionic = lif->ionic;
int err = 0;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return -EBUSY;
+
/* flap the link to force auto-negotiation */
mutex_lock(&ionic->dev_cmd_lock);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 0478b48d9895..278610ed7227 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -2936,6 +2936,8 @@ struct ionic_hwstamp_regs {
* @asic_type: Asic type
* @asic_rev: Asic revision
* @fw_status: Firmware status
+ * bit 0 - 1 = fw running
+ * bit 4-7 - 4 bit generation number, changes on fw restart
* @fw_heartbeat: Firmware heartbeat counter
* @serial_num: Serial number
* @fw_version: Firmware version
@@ -2949,7 +2951,8 @@ union ionic_dev_info_regs {
u8 version;
u8 asic_type;
u8 asic_rev;
-#define IONIC_FW_STS_F_RUNNING 0x1
+#define IONIC_FW_STS_F_RUNNING 0x01
+#define IONIC_FW_STS_F_GENERATION 0xF0
u8 fw_status;
u32 fw_heartbeat;
char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN];
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index e795fa63ca12..f52c47a71f4b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/cpumask.h>
+#include <linux/crash_dump.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -1599,7 +1600,6 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
features = NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_RXCSUM |
@@ -1607,6 +1607,9 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
NETIF_F_TSO6 |
NETIF_F_TSO_ECN;
+ if (lif->nxqs > 1)
+ features |= NETIF_F_RXHASH;
+
err = ionic_set_nic_features(lif, features);
if (err)
return err;
@@ -2257,7 +2260,7 @@ static int ionic_stop(struct net_device *netdev)
return 0;
}
-static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -2519,7 +2522,7 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
- .ndo_do_ioctl = ionic_do_ioctl,
+ .ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
@@ -2580,22 +2583,26 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_qcq **tx_qcqs = NULL;
struct ionic_qcq **rx_qcqs = NULL;
unsigned int flags, i;
- int err = -ENOMEM;
+ int err = 0;
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
- if (!tx_qcqs)
+ if (!tx_qcqs) {
+ err = -ENOMEM;
goto err_out;
+ }
}
if (qparam->nxqs != lif->nxqs ||
qparam->nrxq_descs != lif->nrxq_descs ||
qparam->rxq_features != lif->rxq_features) {
rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
sizeof(struct ionic_qcq *), GFP_KERNEL);
- if (!rx_qcqs)
+ if (!rx_qcqs) {
+ err = -ENOMEM;
goto err_out;
+ }
}
/* allocate new desc_info and rings, but leave the interrupt setup
@@ -2774,6 +2781,9 @@ err_out:
ionic_qcq_free(lif, lif->rxqcqs[i]);
}
+ if (err)
+ netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
+
return err;
}
@@ -2827,8 +2837,14 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->ionic = ionic;
lif->index = 0;
- lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
- lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+
+ if (is_kdump_kernel()) {
+ lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
+ lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
+ } else {
+ lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
+ lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+ }
/* Convert the default coalesce value to actual hw resolution */
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
@@ -3514,6 +3530,7 @@ int ionic_lif_size(struct ionic *ionic)
unsigned int min_intrs;
int err;
+ /* retrieve basic values from FW */
lc = &ident->lif.eth.config;
dev_nintrs = le32_to_cpu(ident->dev.nintrs);
neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
@@ -3521,6 +3538,15 @@ int ionic_lif_size(struct ionic *ionic)
ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
+ /* limit values to play nice with kdump */
+ if (is_kdump_kernel()) {
+ dev_nintrs = 2;
+ neqs_per_lif = 0;
+ nnqs_per_lif = 0;
+ ntxqs_per_lif = 1;
+ nrxqs_per_lif = 1;
+ }
+
/* reserve last queue id for hardware timestamping */
if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 61cfe2120817..5f1e5b6e85c3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -450,6 +450,8 @@ int ionic_identify(struct ionic *ionic)
}
mutex_unlock(&ionic->dev_cmd_lock);
+ dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version);
+
if (err) {
dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
goto err_out;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 6e2403c71608..afc45da399d4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -119,8 +119,8 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n",
- config->rx_filter, rx_filt, rx_all);
+ dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n",
+ __func__, config->rx_filter, rx_filt, rx_all);
if (tx_mode) {
err = ionic_lif_create_hwstamp_txq(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 08870190e4d2..37c39581b659 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
-{
- buf_info->page = NULL;
- buf_info->page_offset = 0;
- buf_info->dma_addr = 0;
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
struct device *dev;
+ struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
@@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
return -EINVAL;
}
- buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!buf_info->page)) {
+ page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
+ if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
return -ENOMEM;
}
- buf_info->page_offset = 0;
- buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
+ buf_info->dma_addr = dma_map_page(dev, page, 0,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
- __free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ __free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
return -EIO;
}
+ buf_info->page = page;
+ buf_info->page_offset = 0;
+
return 0;
}
@@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
__free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
@@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
- prefetch(buf_info->page);
+ prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
@@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
buf_info++;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 98f430905ffa..1203353238e5 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -99,7 +99,7 @@ config QED_SRIOV
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This enables the support for Marvell FastLinQ adapters family,
ethernet driver.
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b590c70539b5..d58e021614cd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,15 +26,6 @@
extern const struct qed_common_ops qed_common_ops_pass;
-#define QED_MAJOR_VERSION 8
-#define QED_MINOR_VERSION 37
-#define QED_REVISION_VERSION 0
-#define QED_ENGINEERING_VERSION 20
-
-#define QED_VERSION \
- ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
- (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
-
#define STORM_FW_VERSION \
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
@@ -517,12 +508,6 @@ enum qed_hsi_def_type {
QED_NUM_HSI_DEFS
};
-#define DRV_MODULE_VERSION \
- __stringify(QED_MAJOR_VERSION) "." \
- __stringify(QED_MINOR_VERSION) "." \
- __stringify(QED_REVISION_VERSION) "." \
- __stringify(QED_ENGINEERING_VERSION)
-
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index e81dd34a3cac..dc93ddea8906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -741,7 +741,6 @@ static int
qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_dcbx_mib_meta_data data;
- int rc = 0;
memset(&data, 0, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
@@ -750,7 +749,7 @@ qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
data.size = sizeof(struct lldp_config_params_s);
qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
- return rc;
+ return 0;
}
static int
@@ -810,7 +809,6 @@ static int
qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_dcbx_mib_meta_data data;
- int rc = 0;
memset(&data, 0, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
@@ -819,7 +817,7 @@ qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
data.size = sizeof(struct dcbx_local_params);
qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
- return rc;
+ return 0;
}
static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index cf7f4da68e69..4c7501b9c284 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -207,14 +207,15 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
struct devlink *dl;
int rc;
- dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink));
+ dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink),
+ &cdev->pdev->dev);
if (!dl)
return ERR_PTR(-ENOMEM);
qdevlink = devlink_priv(dl);
qdevlink->cdev = cdev;
- rc = devlink_register(dl, &cdev->pdev->dev);
+ rc = devlink_register(dl);
if (rc)
goto err_free;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 578935f643b8..ab6d4f737316 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -464,12 +464,19 @@ static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
u32 int_sts, first_drop_reason, details, address, all_drops_reason;
struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ if (int_sts == 0xdeadbeaf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "DORQ is being reset, skipping int_sts handler\n");
+
+ return 0;
+ }
+
/* int_sts may be zero since all PFs were interrupted for doorbell
* overflow but another one already handled it. Can abort here. If
* This PF also requires overflow recovery we will be interrupted again.
* The masked almost full indication may also be set. Ignoring.
*/
- int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
return 0;
@@ -528,6 +535,9 @@ static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
{
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
p_hwfn->db_recovery_info.dorq_attn = true;
qed_dorq_attn_overflow(p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index a99861124630..fc8b3e64f153 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1624,8 +1624,6 @@ qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
static const u32 ip_zero[4] = { 0, 0, 0, 0 };
bool found = false;
- qed_iwarp_print_cm_info(p_hwfn, cm_info);
-
list_for_each_entry(listener,
&p_hwfn->p_rdma_info->iwarp.listen_list,
list_entry) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 5bd58c65e163..6871d892eabf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -49,11 +49,10 @@
#define QED_NVM_CFG_MAX_ATTRS 50
static char version[] =
- "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
+ "QLogic FastLinQ 4xxxx Core Module qed\n";
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
#define FW_FILE_VERSION \
__stringify(FW_MAJOR_VERSION) "." \
@@ -1216,6 +1215,10 @@ static void qed_slowpath_task(struct work_struct *work)
if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
&hwfn->slowpath_task_flags)) {
+ /* skip qed_db_rec_handler during recovery/unload */
+ if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
+ goto out;
+
qed_db_rec_handler(hwfn, ptt);
if (hwfn->periodic_db_rec_count--)
qed_slowpath_delayed_work(hwfn,
@@ -1223,6 +1226,7 @@ static void qed_slowpath_task(struct work_struct *work)
QED_PERIODIC_DB_REC_INTERVAL);
}
+out:
qed_ptt_release(hwfn, ptt);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 4387292c37e2..6e5a6cc97d0e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -944,7 +944,6 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
memset(&in_params, 0, sizeof(in_params));
in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
- in_params.drv_ver_0 = QED_VERSION;
in_params.drv_ver_1 = qed_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
index c1dd71d19f3f..3b84d00cf987 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
@@ -4,7 +4,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 5630008f38b7..66c69f0f9af1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -30,15 +30,6 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 37
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 20
-#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
- __stringify(QEDE_MINOR_VERSION) "." \
- __stringify(QEDE_REVISION_VERSION) "." \
- __stringify(QEDE_ENGINEERING_VERSION)
-
#define DRV_MODULE_SYM qede
struct qede_stats_common {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1560ad3d9290..9c6aa6859646 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -625,13 +625,13 @@ static void qede_get_drvinfo(struct net_device *ndev,
(edev->dev_info.common.mfw_rev >> 8) & 0xFF,
edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) <
+ if ((strlen(storm) + strlen("[storm]")) <
sizeof(info->version))
snprintf(info->version, sizeof(info->version),
- "%s [storm %s]", DRV_MODULE_VERSION, storm);
+ "[storm %s]", storm);
else
snprintf(info->version, sizeof(info->version),
- "%s %s", DRV_MODULE_VERSION, storm);
+ "%s", storm);
if (edev->dev_info.common.mbi_version) {
snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 7c6064baeba2..d400e9b235bf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -39,12 +39,8 @@
#include "qede.h"
#include "qede_ptp.h"
-static char version[] =
- "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
-
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static uint debug;
module_param(debug, uint, 0);
@@ -258,7 +254,7 @@ int __init qede_init(void)
{
int ret;
- pr_info("qede_init: %s\n", version);
+ pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
qede_forced_speed_maps_init();
@@ -644,7 +640,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
- .ndo_do_ioctl = qede_ioctl,
+ .ndo_eth_ioctl = qede_ioctl,
.ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
@@ -1157,10 +1153,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* Start the Slowpath-process */
memset(&sp_params, 0, sizeof(sp_params));
sp_params.int_mode = QED_INT_MODE_MSIX;
- sp_params.drv_major = QEDE_MAJOR_VERSION;
- sp_params.drv_minor = QEDE_MINOR_VERSION;
- sp_params.drv_rev = QEDE_REVISION_VERSION;
- sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index d8f0863b3934..f6b6651decf3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1021,7 +1021,7 @@ clear_diag_irq:
static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
{
- unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+ static const unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index ad655f0a4965..9015a38eaced 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -377,7 +377,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = emac_change_mtu,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_get_stats64 = emac_get_stats64,
.ndo_set_features = emac_set_features,
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 47e9998b62f0..4b2eca5e08e2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -954,7 +954,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_rx_mode = r6040_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 9677e257e9a1..edc61906694f 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1869,7 +1869,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_set_mac_address = cp_set_mac_address,
.ndo_set_rx_mode = cp_set_rx_mode,
.ndo_get_stats = cp_get_stats,
- .ndo_do_ioctl = cp_ioctl,
+ .ndo_eth_ioctl = cp_ioctl,
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index f0608f050050..2e6923cc653e 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -932,7 +932,7 @@ static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
.ndo_set_rx_mode = rtl8139_set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = rtl8139_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8139_poll_controller,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 4d8e337f5085..7a69b468584a 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2598,7 +2598,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
-static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
+static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
u32 csi;
@@ -2606,6 +2606,8 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
/* According to Realtek the value at config space address 0x070f
* controls the L0s/L1 entrance latency. We try standard ECAM access
* first and if it fails fall back to CSI.
+ * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo)
+ * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us
*/
if (pdev->cfg_size > 0x070f &&
pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
@@ -2619,7 +2621,8 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
{
- rtl_csi_access_enable(tp, 0x27);
+ /* L0 7us, L1 16us */
+ rtl_set_aspm_entry_latency(tp, 0x27);
}
struct ephy_info {
@@ -3502,8 +3505,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
- /* The default value is 0x13. Change it to 0x2f */
- rtl_csi_access_enable(tp, 0x2f);
+ /* L0 7us, L1 32us - needed to avoid issues with link-up detection */
+ rtl_set_aspm_entry_latency(tp, 0x2f);
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
@@ -4983,7 +4986,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_fix_features = rtl8169_fix_features,
.ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = rtl_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 5a2a4af31812..8008b2f45934 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -32,11 +32,11 @@ config SH_ETH
config RAVB
tristate "Renesas Ethernet AVB support"
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select MII
select MDIO_BITBANG
select PHYLIB
- imply PTP_1588_CLOCK
help
Renesas Ethernet AVB device driver.
This driver supports the following SoCs:
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 80e62ca2e3d3..37ad0f8aaf3c 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -988,6 +988,21 @@ enum ravb_chip_id {
RCAR_GEN3,
};
+struct ravb_hw_info {
+ const char (*gstrings_stats)[ETH_GSTRING_LEN];
+ size_t gstrings_size;
+ netdev_features_t net_hw_features;
+ netdev_features_t net_features;
+ enum ravb_chip_id chip_id;
+ int stats_len;
+ size_t max_rx_len;
+ unsigned aligned_tx: 1;
+
+ /* hardware features */
+ unsigned internal_delay:1; /* AVB-DMAC has internal delays */
+ unsigned tx_counters:1; /* E-MAC has TX counters */
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -1039,7 +1054,9 @@ struct ravb_private {
unsigned rxcidm:1; /* RX Clock Internal Delay Mode */
unsigned txcidm:1; /* TX Clock Internal Delay Mode */
unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
- int num_tx_desc; /* TX descriptors per packet */
+ unsigned int num_tx_desc; /* TX descriptors per packet */
+
+ const struct ravb_hw_info *info;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 805397088850..02842b980a7f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -177,10 +177,10 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
- int num_tx_desc = priv->num_tx_desc;
+ unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *desc;
+ unsigned int entry;
int free_num = 0;
- int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
@@ -220,9 +220,9 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
- int ring_size;
- int i;
+ unsigned int num_tx_desc = priv->num_tx_desc;
+ unsigned int ring_size;
+ unsigned int i;
if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
@@ -275,15 +275,15 @@ static void ravb_ring_free(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
+ unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_ex_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
- int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
- num_tx_desc;
+ unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+ num_tx_desc;
dma_addr_t dma_addr;
- int i;
+ unsigned int i;
priv->cur_rx[q] = 0;
priv->cur_tx[q] = 0;
@@ -339,10 +339,11 @@ static void ravb_ring_format(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
+ const struct ravb_hw_info *info = priv->info;
+ unsigned int num_tx_desc = priv->num_tx_desc;
+ unsigned int ring_size;
struct sk_buff *skb;
- int ring_size;
- int i;
+ unsigned int i;
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -353,7 +354,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -535,6 +536,7 @@ static void ravb_rx_csum(struct sk_buff *skb)
static bool ravb_rx(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
priv->cur_rx[q];
@@ -619,9 +621,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev,
- RX_BUF_SZ +
- RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
@@ -1133,13 +1133,14 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_1_over_errors",
};
-#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
-
static int ravb_get_sset_count(struct net_device *netdev, int sset)
{
+ struct ravb_private *priv = netdev_priv(netdev);
+ const struct ravb_hw_info *info = priv->info;
+
switch (sset) {
case ETH_SS_STATS:
- return RAVB_STATS_LEN;
+ return info->stats_len;
default:
return -EOPNOTSUPP;
}
@@ -1176,9 +1177,12 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ memcpy(data, info->gstrings_stats, info->gstrings_size);
break;
}
}
@@ -1488,7 +1492,7 @@ out:
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int num_tx_desc = priv->num_tx_desc;
+ unsigned int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
@@ -1628,13 +1632,14 @@ static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
nstats = &ndev->stats;
stats0 = &priv->stats[RAVB_BE];
stats1 = &priv->stats[RAVB_NC];
- if (priv->chip_id == RCAR_GEN3) {
+ if (info->tx_counters) {
nstats->tx_dropped += ravb_read(ndev, TROCR);
ravb_write(ndev, 0, TROCR); /* (write clear) */
}
@@ -1872,7 +1877,7 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_get_stats = ravb_get_stats,
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
- .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_eth_ioctl = ravb_do_ioctl,
.ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -1924,12 +1929,35 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen3_hw_info = {
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .chip_id = RCAR_GEN3,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .internal_delay = 1,
+ .tx_counters = 1,
+};
+
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .chip_id = RCAR_GEN2,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .aligned_tx = 1,
+};
+
static const struct of_device_id ravb_match_table[] = {
- { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
- { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
+ { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
+ { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
+ { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
@@ -1973,13 +2001,6 @@ static void ravb_set_config_mode(struct net_device *ndev)
}
}
-static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
- { .soc_id = "r8a774c0" },
- { .soc_id = "r8a77990" },
- { .soc_id = "r8a77995" },
- { /* sentinel */ }
-};
-
/* Set tx and rx clock internal delay modes */
static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
{
@@ -2010,12 +2031,8 @@ static void ravb_parse_delay_mode(struct device_node *np, struct net_device *nde
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
- "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
- phy_modes(priv->phy_interface))) {
- priv->txcidm = 1;
- priv->rgmii_override = 1;
- }
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
}
}
@@ -2034,8 +2051,8 @@ static void ravb_set_delay_mode(struct net_device *ndev)
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct ravb_hw_info *info;
struct ravb_private *priv;
- enum ravb_chip_id chip_id;
struct net_device *ndev;
int error, irq, q;
struct resource *res;
@@ -2052,15 +2069,15 @@ static int ravb_probe(struct platform_device *pdev)
if (!ndev)
return -ENOMEM;
- ndev->features = NETIF_F_RXCSUM;
- ndev->hw_features = NETIF_F_RXCSUM;
+ info = of_device_get_match_data(&pdev->dev);
+
+ ndev->features = info->net_features;
+ ndev->hw_features = info->net_hw_features;
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
-
- if (chip_id == RCAR_GEN3)
+ if (info->chip_id == RCAR_GEN3)
irq = platform_get_irq_byname(pdev, "ch22");
else
irq = platform_get_irq(pdev, 0);
@@ -2073,6 +2090,7 @@ static int ravb_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
+ priv->info = info;
priv->ndev = ndev;
priv->pdev = pdev;
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
@@ -2099,7 +2117,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (chip_id == RCAR_GEN3) {
+ if (info->chip_id == RCAR_GEN3) {
irq = platform_get_irq_byname(pdev, "ch24");
if (irq < 0) {
error = irq;
@@ -2124,7 +2142,7 @@ static int ravb_probe(struct platform_device *pdev)
}
}
- priv->chip_id = chip_id;
+ priv->chip_id = info->chip_id;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -2142,7 +2160,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
- priv->num_tx_desc = chip_id == RCAR_GEN2 ?
+ priv->num_tx_desc = info->aligned_tx ?
NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
/* Set function */
@@ -2160,7 +2178,7 @@ static int ravb_probe(struct platform_device *pdev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- if (priv->chip_id != RCAR_GEN2) {
+ if (info->internal_delay) {
ravb_parse_delay_mode(np, ndev);
ravb_set_delay_mode(ndev);
}
@@ -2184,7 +2202,7 @@ static int ravb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->ts_skb_list);
/* Initialise PTP Clock driver */
- if (chip_id != RCAR_GEN2)
+ if (info->chip_id != RCAR_GEN2)
ravb_ptp_init(ndev, pdev);
/* Debug message level */
@@ -2232,7 +2250,7 @@ out_dma_free:
priv->desc_bat_dma);
/* Stop PTP Clock driver */
- if (chip_id != RCAR_GEN2)
+ if (info->chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
@@ -2333,6 +2351,7 @@ static int __maybe_unused ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int ret = 0;
/* If WoL is enabled set reset mode to rearm the WoL logic */
@@ -2355,7 +2374,7 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- if (priv->chip_id != RCAR_GEN2)
+ if (info->internal_delay)
ravb_set_delay_mode(ndev);
/* Restore descriptor base address table */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 840478692a37..6c8ba916d1a6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3141,7 +3141,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_get_stats = sh_eth_get_stats,
.ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -3157,7 +3157,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 315a6e5c0f59..e75814a4654f 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -119,7 +119,8 @@ struct rocker_world_ops {
int (*port_obj_fdb_del)(struct rocker_port *rocker_port,
u16 vid, const unsigned char *addr);
int (*port_master_linked)(struct rocker_port *rocker_port,
- struct net_device *master);
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
int (*port_master_unlinked)(struct rocker_port *rocker_port,
struct net_device *master);
int (*port_neigh_update)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 1f06b92ee5bb..3364b6a56bd1 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1670,13 +1670,14 @@ rocker_world_port_fdb_del(struct rocker_port *rocker_port,
}
static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
- struct net_device *master)
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_master_linked)
return -EOPNOTSUPP;
- return wops->port_master_linked(rocker_port, master);
+ return wops->port_master_linked(rocker_port, master, extack);
}
static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
@@ -3107,6 +3108,7 @@ struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
struct rocker_port *rocker_port;
@@ -3123,7 +3125,8 @@ static int rocker_netdevice_event(struct notifier_block *unused,
rocker_port = netdev_priv(dev);
if (info->linking) {
err = rocker_world_port_master_linked(rocker_port,
- info->upper_dev);
+ info->upper_dev,
+ extack);
if (err)
netdev_warn(dev, "failed to reflect master linked (err %d)\n",
err);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index e33a9d283a4e..3e1ca7a8d029 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2571,8 +2571,10 @@ static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
}
static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
- struct net_device *bridge)
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
{
+ struct net_device *dev = ofdpa_port->dev;
int err;
/* Port is joining bridge, so the internal VLAN for the
@@ -2592,13 +2594,21 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
ofdpa_port->bridge_dev = bridge;
- return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
+ err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
+ if (err)
+ return err;
+
+ return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL,
+ false, extack);
}
static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
{
+ struct net_device *dev = ofdpa_port->dev;
int err;
+ switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL);
+
err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
if (err)
return err;
@@ -2637,13 +2647,14 @@ static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
}
static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
- struct net_device *master)
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
int err = 0;
if (netif_is_bridge_master(master))
- err = ofdpa_port_bridge_join(ofdpa_port, master);
+ err = ofdpa_port_bridge_join(ofdpa_port, master, extack);
else if (netif_is_ovs_master(master))
err = ofdpa_port_ovs_changed(ofdpa_port, master);
return err;
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
index 0582e110b1c0..2a6c2658d284 100644
--- a/drivers/net/ethernet/samsung/Kconfig
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -20,9 +20,9 @@ if NET_VENDOR_SAMSUNG
config SXGBE_ETH
tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
select CRC32
- imply PTP_1588_CLOCK
help
This is the driver for the SXGBE 10G Ethernet IP block found on
Samsung platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 090bcd2fb758..6781aa636d58 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1964,7 +1964,7 @@ static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_set_features = sxgbe_set_features,
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
- .ndo_do_ioctl = sxgbe_ioctl,
+ .ndo_eth_ioctl = sxgbe_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 5e37c8313725..97ce64079855 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -19,9 +19,9 @@ if NET_VENDOR_SOLARFLARE
config SFC
tristate "Solarflare SFC9000/SFC9100/EF100-family support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
select CRC32
- imply PTP_1588_CLOCK
help
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC9000-family and SFC9100-family controllers.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 37fcf2eb0741..a295e2621cf3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -591,7 +591,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
+ .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 9ec752a43c75..c177ea0f301e 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2219,7 +2219,7 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_tx_timeout = ef4_watchdog,
.ndo_start_xmit = ef4_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ef4_ioctl,
+ .ndo_eth_ioctl = ef4_ioctl,
.ndo_change_mtu = ef4_change_mtu,
.ndo_set_mac_address = ef4_set_mac_address,
.ndo_set_rx_mode = ef4_set_rx_mode,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 2b29fd4cbdf4..062f7844c496 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -820,7 +820,7 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_tx_timeout = ioc3_timeout,
.ndo_get_stats = ioc3_get_stats,
.ndo_set_rx_mode = ioc3_set_multicast_list,
- .ndo_do_ioctl = ioc3_ioctl,
+ .ndo_eth_ioctl = ioc3_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ioc3_set_mac_address,
};
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 0c396ecd3389..efce834d8ee6 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -812,7 +812,7 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_open = meth_open,
.ndo_stop = meth_release,
.ndo_start_xmit = meth_tx,
- .ndo_do_ioctl = meth_ioctl,
+ .ndo_eth_ioctl = meth_ioctl,
.ndo_tx_timeout = meth_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 676b193833c0..3d1a18a01ce5 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1841,7 +1841,7 @@ static int sis190_mac_addr(struct net_device *dev, void *p)
static const struct net_device_ops sis190_netdev_ops = {
.ndo_open = sis190_open,
.ndo_stop = sis190_close,
- .ndo_do_ioctl = sis190_ioctl,
+ .ndo_eth_ioctl = sis190_ioctl,
.ndo_start_xmit = sis190_start_xmit,
.ndo_tx_timeout = sis190_tx_timeout,
.ndo_set_rx_mode = sis190_set_rx_mode,
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cff87de9178a..60a0c0e9ded2 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -404,7 +404,7 @@ static const struct net_device_ops sis900_netdev_ops = {
.ndo_set_rx_mode = set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = mii_ioctl,
+ .ndo_eth_ioctl = mii_ioctl,
.ndo_tx_timeout = sis900_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sis900_poll,
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index c52a38df0e0d..72e42a868346 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -23,6 +23,7 @@ config SMC9194
tristate "SMC 9194 support"
depends on ISA
select CRC32
+ select NETDEV_LEGACY_INIT
help
This is support for the SMC9xxx based Ethernet cards. Choose this
option if you have a DELL laptop with the docking station, or
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 51cd7dca91cd..44daf79a8f97 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -312,7 +312,7 @@ static const struct net_device_ops epic_netdev_ops = {
.ndo_tx_timeout = epic_tx_timeout,
.ndo_get_stats = epic_get_stats,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index bf7c8c8b1350..0ce403fa5f1a 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1508,7 +1508,7 @@ MODULE_PARM_DESC(io, "SMC 99194 I/O base address");
MODULE_PARM_DESC(irq, "SMC 99194 IRQ number");
MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)");
-int __init init_module(void)
+static int __init smc_init_module(void)
{
if (io == 0)
printk(KERN_WARNING
@@ -1518,13 +1518,15 @@ int __init init_module(void)
devSMC9194 = smc_init(-1);
return PTR_ERR_OR_ZERO(devSMC9194);
}
+module_init(smc_init_module);
-void __exit cleanup_module(void)
+static void __exit smc_cleanup_module(void)
{
unregister_netdev(devSMC9194);
free_irq(devSMC9194->irq, devSMC9194);
release_region(devSMC9194->base_addr, SMC_IO_EXTENT);
free_netdev(devSMC9194);
}
+module_exit(smc_cleanup_module);
#endif /* MODULE */
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index f2a50eb3c1e0..42fc37c7887a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -294,7 +294,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_tx_timeout = smc_tx_timeout,
.ndo_set_config = s9k_config,
.ndo_set_rx_mode = set_rx_mode,
- .ndo_do_ioctl = smc_ioctl,
+ .ndo_eth_ioctl = smc_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 556a9790cdcf..199a97339280 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2148,7 +2148,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
.ndo_start_xmit = smsc911x_hard_start_xmit,
.ndo_get_stats = smsc911x_get_stats,
.ndo_set_rx_mode = smsc911x_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = smsc911x_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index c1dab009415d..fdbd2a43e267 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1482,7 +1482,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
.ndo_start_xmit = smsc9420_hard_start_xmit,
.ndo_get_stats = smsc9420_get_stats,
.ndo_set_rx_mode = smsc9420_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 20d148c019d8..d15f7b3a3f10 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1831,7 +1831,7 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_features = netsec_netdev_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_xdp_xmit = netsec_xdp_xmit,
.ndo_bpf = netsec_xdp,
};
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 5eb6bb4f7b6c..ae31ed93aaf0 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1543,7 +1543,7 @@ static const struct net_device_ops ave_netdev_ops = {
.ndo_open = ave_open,
.ndo_stop = ave_stop,
.ndo_start_xmit = ave_start_xmit,
- .ndo_do_ioctl = ave_ioctl,
+ .ndo_eth_ioctl = ave_ioctl,
.ndo_set_rx_mode = ave_set_rx_mode,
.ndo_get_stats64 = ave_get_stats64,
.ndo_set_mac_address = ave_set_mac_address,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index ac3c248d4f9b..929cfc22cd0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -2,12 +2,12 @@
config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
select MII
select PCS_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
- imply PTP_1588_CLOCK
select RESET_CONTROLLER
help
This is the driver for the Ethernet IPs built around a
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 5fecc83f175b..b6d945ea903d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -58,6 +58,16 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+struct stmmac_txq_stats {
+ unsigned long tx_pkt_n;
+ unsigned long tx_normal_irq_n;
+};
+
+struct stmmac_rxq_stats {
+ unsigned long rx_pkt_n;
+ unsigned long rx_normal_irq_n;
+};
+
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -189,6 +199,9 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ /* per queue statistics */
+ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
+ struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
};
/* Safety Feature statistics exposed by ethtool */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 28dd0ed85a82..f7dc8458cde8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -289,10 +289,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
- phy_modes(gmac->phy_mode));
- err = -EINVAL;
- goto err_remove_config_dt;
+ goto err_unsupported_phy;
}
regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
@@ -309,10 +306,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
- phy_modes(gmac->phy_mode));
- err = -EINVAL;
- goto err_remove_config_dt;
+ goto err_unsupported_phy;
}
regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
@@ -329,8 +323,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
break;
default:
- /* We don't get here; the switch above will have errored out */
- unreachable();
+ goto err_unsupported_phy;
}
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
@@ -361,6 +354,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
return 0;
+err_unsupported_phy:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ err = -EINVAL;
+
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index e63270267578..9292a1fab7d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -170,13 +170,16 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
x->normal_irq_n++;
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
x->rx_normal_irq_n++;
+ x->rxq_stats[chan].rx_normal_irq_n++;
ret |= handle_rx;
}
- if (likely(intr_status & (DMA_CHAN_STATUS_TI |
- DMA_CHAN_STATUS_TBU))) {
+ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
x->tx_normal_irq_n++;
+ x->txq_stats[chan].tx_normal_irq_n++;
ret |= handle_tx;
}
+ if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
+ ret |= handle_tx;
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
x->rx_early_irq++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d0ce608b81c3..595c3ccdcbb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -261,6 +261,18 @@ static const struct stmmac_stats stmmac_mmc[] = {
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
+static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = {
+ "tx_pkt_n",
+ "tx_irq_n",
+#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string)
+};
+
+static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = {
+ "rx_pkt_n",
+ "rx_irq_n",
+#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string)
+};
+
static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -510,6 +522,31 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
}
+static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ int q, stat;
+ char *p;
+
+ for (q = 0; q < tx_cnt; q++) {
+ p = (char *)priv + offsetof(struct stmmac_priv,
+ xstats.txq_stats[q].tx_pkt_n);
+ for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+ *data++ = (*(u64 *)p);
+ p += sizeof(u64 *);
+ }
+ }
+ for (q = 0; q < rx_cnt; q++) {
+ p = (char *)priv + offsetof(struct stmmac_priv,
+ xstats.rxq_stats[q].rx_pkt_n);
+ for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+ *data++ = (*(u64 *)p);
+ p += sizeof(u64 *);
+ }
+ }
+}
+
static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
@@ -560,16 +597,21 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
+ stmmac_get_per_qstats(priv, &data[j]);
}
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
int i, len, safety_len = 0;
switch (sset) {
case ETH_SS_STATS:
- len = STMMAC_STATS_LEN;
+ len = STMMAC_STATS_LEN +
+ STMMAC_TXQ_STATS * tx_cnt +
+ STMMAC_RXQ_STATS * rx_cnt;
if (priv->dma_cap.rmon)
len += STMMAC_MMC_STATS_LEN;
@@ -592,6 +634,28 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
}
}
+static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ int q, stat;
+
+ for (q = 0; q < tx_cnt; q++) {
+ for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+ snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
+ stmmac_qstats_tx_string[stat]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (q = 0; q < rx_cnt; q++) {
+ for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+ snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
+ stmmac_qstats_rx_string[stat]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int i;
@@ -622,6 +686,7 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ stmmac_get_qstats_string(priv, p);
break;
case ETH_SS_TEST:
stmmac_selftest_get_strings(priv, p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7b8404a21544..7b3fcf558603 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2500,6 +2500,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
} else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
+ priv->xstats.txq_stats[queue].tx_pkt_n++;
}
if (skb)
stmmac_get_tx_hwtstamp(priv, p, skb);
@@ -5000,6 +5001,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
if (failure || stmmac_rx_dirty(priv, queue) > 0)
xsk_set_rx_need_wakeup(rx_q->xsk_pool);
@@ -5287,6 +5291,7 @@ drain_data:
stmmac_rx_refill(priv, queue);
priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
return count;
}
@@ -6451,7 +6456,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_features = stmmac_set_features,
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
- .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_eth_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 981685c88308..287ae4c538aa 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4876,7 +4876,7 @@ static const struct net_device_ops cas_netdev_ops = {
.ndo_start_xmit = cas_start_xmit,
.ndo_get_stats = cas_get_stats,
.ndo_set_rx_mode = cas_set_multicast,
- .ndo_do_ioctl = cas_ioctl,
+ .ndo_eth_ioctl = cas_ioctl,
.ndo_tx_timeout = cas_tx_timeout,
.ndo_change_mtu = cas_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 860644d182ab..1501e8906be4 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9208,7 +9208,7 @@ static int niu_get_of_props(struct niu *np)
else
dp = pci_device_to_OF_node(np->pdev);
- phy_type = of_get_property(dp, "phy-type", &prop_len);
+ phy_type = of_get_property(dp, "phy-type", NULL);
if (!phy_type) {
netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp);
return -EINVAL;
@@ -9242,12 +9242,12 @@ static int niu_get_of_props(struct niu *np)
return -EINVAL;
}
- model = of_get_property(dp, "model", &prop_len);
+ model = of_get_property(dp, "model", NULL);
if (model)
strcpy(np->vpd.model, model);
- if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
+ if (of_find_property(dp, "hot-swappable-phy", NULL)) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
}
@@ -9668,7 +9668,7 @@ static const struct net_device_ops niu_netdev_ops = {
.ndo_set_rx_mode = niu_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = niu_set_mac_addr,
- .ndo_do_ioctl = niu_ioctl,
+ .ndo_eth_ioctl = niu_ioctl,
.ndo_tx_timeout = niu_tx_timeout,
.ndo_change_mtu = niu_change_mtu,
};
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index cfb9e21b18b7..d72018a60c0f 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2831,7 +2831,7 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_start_xmit = gem_start_xmit,
.ndo_get_stats = gem_get_stats,
.ndo_set_rx_mode = gem_set_multicast,
- .ndo_do_ioctl = gem_ioctl,
+ .ndo_eth_ioctl = gem_ioctl,
.ndo_tx_timeout = gem_tx_timeout,
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 26d178f8616b..1db7104fef3a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -933,7 +933,7 @@ static const struct net_device_ops xlgmac_netdev_ops = {
.ndo_change_mtu = xlgmac_change_mtu,
.ndo_set_mac_address = xlgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = xlgmac_ioctl,
+ .ndo_eth_ioctl = xlgmac_ioctl,
.ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index d054c6e83b1c..8f6abaec41d1 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -637,7 +637,8 @@ static int bdx_range_check(struct bdx_priv *priv, u32 offset)
-EINVAL : 0;
}
-static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
+static int bdx_siocdevprivate(struct net_device *ndev, struct ifreq *ifr,
+ void __user *udata, int cmd)
{
struct bdx_priv *priv = netdev_priv(ndev);
u32 data[3];
@@ -647,7 +648,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
if (cmd != SIOCDEVPRIVATE) {
- error = copy_from_user(data, ifr->ifr_data, sizeof(data));
+ error = copy_from_user(data, udata, sizeof(data));
if (error) {
pr_err("can't copy from user\n");
RET(-EFAULT);
@@ -669,7 +670,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
data[2] = READ_REG(priv, data[1]);
DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
data[2]);
- error = copy_to_user(ifr->ifr_data, data, sizeof(data));
+ error = copy_to_user(udata, data, sizeof(data));
if (error)
RET(-EFAULT);
break;
@@ -688,15 +689,6 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
return 0;
}
-static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- ENTER;
- if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
- RET(bdx_ioctl_priv(ndev, ifr, cmd));
- else
- RET(-EOPNOTSUPP);
-}
-
/**
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
* @ndev: network device
@@ -1860,7 +1852,7 @@ static const struct net_device_ops bdx_netdev_ops = {
.ndo_stop = bdx_close,
.ndo_start_xmit = bdx_tx_transmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = bdx_ioctl,
+ .ndo_siocdevprivate = bdx_siocdevprivate,
.ndo_set_rx_mode = bdx_setmulti,
.ndo_change_mtu = bdx_change_mtu,
.ndo_set_mac_address = bdx_set_mac,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 67a08cbba859..130346f74ee8 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -27,6 +27,7 @@
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
+#include <net/switchdev.h>
#include "cpsw_ale.h"
#include "cpsw_sl.h"
@@ -518,6 +519,10 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
}
napi_enable(&common->napi_rx);
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ enable_irq(common->rx_chns.irq);
+ }
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
@@ -871,8 +876,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
- enable_irq(common->rx_chns.irq);
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ enable_irq(common->rx_chns.irq);
+ }
+ }
return num_rx;
}
@@ -1077,19 +1086,20 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
else
num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
- num_tx = min(num_tx, budget);
- if (num_tx < budget) {
- napi_complete(napi_tx);
+ if (num_tx >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx))
enable_irq(tx_chn->irq);
- }
- return num_tx;
+ return 0;
}
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{
struct am65_cpsw_common *common = dev_id;
+ common->rx_irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&common->napi_rx);
@@ -1479,7 +1489,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
.ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
- .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
+ .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
@@ -2081,10 +2091,13 @@ bool am65_cpsw_port_dev_check(const struct net_device *ndev)
return false;
}
-static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
+static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ int err;
if (!common->br_members) {
common->hw_bridge_dev = br_ndev;
@@ -2096,6 +2109,11 @@ static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_dev
return -EOPNOTSUPP;
}
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ return err;
+
common->br_members |= BIT(priv->port->port_id);
am65_cpsw_port_offload_fwd_mark_update(common);
@@ -2108,6 +2126,8 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
+
common->br_members &= ~BIT(priv->port->port_id);
am65_cpsw_port_offload_fwd_mark_update(common);
@@ -2120,6 +2140,7 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
static int am65_cpsw_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
int ret = NOTIFY_DONE;
@@ -2133,7 +2154,9 @@ static int am65_cpsw_netdevice_event(struct notifier_block *unused,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev);
+ ret = am65_cpsw_netdevice_port_link(ndev,
+ info->upper_dev,
+ extack);
else
am65_cpsw_netdevice_port_unlink(ndev);
}
@@ -2388,21 +2411,6 @@ static const struct devlink_param am65_cpsw_devlink_params[] = {
am65_cpsw_dl_switch_mode_set, NULL),
};
-static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
-{
- struct devlink_port *dl_port;
- struct am65_cpsw_port *port;
- int i;
-
- for (i = 1; i <= common->port_num; i++) {
- port = am65_common_get_port(common, i);
- dl_port = &port->devlink_port;
-
- if (dl_port->registered)
- devlink_port_unregister(dl_port);
- }
-}
-
static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
{
struct devlink_port_attrs attrs = {};
@@ -2414,14 +2422,14 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
int i;
common->devlink =
- devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv));
+ devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
if (!common->devlink)
return -ENOMEM;
dl_priv = devlink_priv(common->devlink);
dl_priv->common = common;
- ret = devlink_register(common->devlink, dev);
+ ret = devlink_register(common->devlink);
if (ret) {
dev_err(dev, "devlink reg fail ret:%d\n", ret);
goto dl_free;
@@ -2464,7 +2472,12 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
return ret;
dl_port_unreg:
- am65_cpsw_unregister_devlink_ports(common);
+ for (i = i - 1; i >= 1; i--) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ devlink_port_unregister(dl_port);
+ }
dl_unreg:
devlink_unregister(common->devlink);
dl_free:
@@ -2475,6 +2488,17 @@ dl_free:
static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
{
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ devlink_port_unregister(dl_port);
+ }
+
if (!AM65_CPSW_IS_CPSW2G(common) &&
IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
devlink_params_unpublish(common->devlink);
@@ -2482,7 +2506,6 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
ARRAY_SIZE(am65_cpsw_devlink_params));
}
- am65_cpsw_unregister_devlink_ports(common);
devlink_unregister(common->devlink);
devlink_free(common->devlink);
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 5d93e346f05e..048ed10143c1 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -126,6 +126,8 @@ struct am65_cpsw_common {
struct am65_cpsw_rx_chn rx_chns;
struct napi_struct napi_rx;
+ bool rx_irq_disabled;
+
u32 nuss_ver;
u32 cpsw_ver;
unsigned long bus_freq;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index c20715107075..02d4e51f7306 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1044,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cbbd0f665796..9f70e40779f6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
- skb_mark_for_recycle(skb, page, pool);
+ skb_mark_for_recycle(skb);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpdma_chan *txch;
int ret, q_idx;
- if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
ndev->stats.tx_dropped++;
return NET_XMIT_DROP;
@@ -1159,7 +1159,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index d1d02001cef6..534d39f729e2 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -28,6 +28,7 @@
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
+#include <net/switchdev.h>
#include <net/page_pool.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
@@ -374,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
- skb_mark_for_recycle(skb, page, pool);
+ skb_mark_for_recycle(skb);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -501,7 +502,7 @@ static void cpsw_restore(struct cpsw_priv *priv)
static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
{
- char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
+ static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
cpsw_ale_add_mcast(cpsw->ale, stpa,
ALE_PORT_HOST, ALE_SUPER, 0,
@@ -1127,7 +1128,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_do_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1500,10 +1501,12 @@ static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
}
static int cpsw_netdevice_port_link(struct net_device *ndev,
- struct net_device *br_ndev)
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
+ int err;
if (!cpsw->br_members) {
cpsw->hw_bridge_dev = br_ndev;
@@ -1515,6 +1518,11 @@ static int cpsw_netdevice_port_link(struct net_device *ndev,
return -EOPNOTSUPP;
}
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ return err;
+
cpsw->br_members |= BIT(priv->emac_port);
cpsw_port_offload_fwd_mark_update(cpsw);
@@ -1527,6 +1535,8 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
+ switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
+
cpsw->br_members &= ~BIT(priv->emac_port);
cpsw_port_offload_fwd_mark_update(cpsw);
@@ -1539,6 +1549,7 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev)
static int cpsw_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
int ret = NOTIFY_DONE;
@@ -1553,7 +1564,8 @@ static int cpsw_netdevice_event(struct notifier_block *unused,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
ret = cpsw_netdevice_port_link(ndev,
- info->upper_dev);
+ info->upper_dev,
+ extack);
else
cpsw_netdevice_port_unlink(ndev);
}
@@ -1791,14 +1803,14 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
struct cpsw_devlink *dl_priv;
int ret = 0;
- cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
+ cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev);
if (!cpsw->devlink)
return -ENOMEM;
dl_priv = devlink_priv(cpsw->devlink);
dl_priv->cpsw = cpsw;
- ret = devlink_register(cpsw->devlink, dev);
+ ret = devlink_register(cpsw->devlink);
if (ret) {
dev_err(dev, "DL reg fail ret:%d\n", ret);
goto dl_free;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index c674e34b6839..b1c5cbe7478b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -943,7 +943,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
goto fail_tx;
}
- ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ ret_code = skb_put_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
if (unlikely(ret_code < 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
@@ -1670,7 +1670,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_start_xmit = emac_dev_xmit,
.ndo_set_rx_mode = emac_dev_mcast_set,
.ndo_set_mac_address = emac_dev_setmac_addr,
- .ndo_do_ioctl = emac_devioctl,
+ .ndo_eth_ioctl = emac_devioctl,
.ndo_tx_timeout = emac_dev_tx_timeout,
.ndo_get_stats = emac_dev_getnetstats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 97942b0e3897..eda2961c0fe2 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1944,7 +1944,7 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_stop = netcp_ndo_stop,
.ndo_start_xmit = netcp_ndo_start_xmit,
.ndo_set_rx_mode = netcp_set_rx_mode,
- .ndo_do_ioctl = netcp_ndo_ioctl,
+ .ndo_eth_ioctl = netcp_ndo_ioctl,
.ndo_get_stats64 = netcp_get_stats,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index e0cb713193ea..77c448ad67ce 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -749,7 +749,7 @@ static const struct net_device_ops tlan_netdev_ops = {
.ndo_tx_timeout = tlan_tx_timeout,
.ndo_get_stats = tlan_get_stats,
.ndo_set_rx_mode = tlan_set_multicast_list,
- .ndo_do_ioctl = tlan_ioctl,
+ .ndo_eth_ioctl = tlan_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 226a76633e65..087f0af56c50 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2214,7 +2214,7 @@ static const struct net_device_ops spider_net_ops = {
.ndo_start_xmit = spider_net_xmit,
.ndo_set_rx_mode = spider_net_set_multi,
.ndo_set_mac_address = spider_net_set_mac,
- .ndo_do_ioctl = spider_net_do_ioctl,
+ .ndo_eth_ioctl = spider_net_do_ioctl,
.ndo_tx_timeout = spider_net_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
/* HW VLAN */
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index fedb2bf69261..52245ac60fc7 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -750,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = {
.ndo_get_stats = tc35815_get_stats,
.ndo_set_rx_mode = tc35815_set_multicast_list,
.ndo_tx_timeout = tc35815_tx_timeout,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c62f474b6d08..cf0917b29e30 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1538,7 +1538,7 @@ static const struct net_device_ops tsi108_netdev_ops = {
.ndo_start_xmit = tsi108_send_packet,
.ndo_set_rx_mode = tsi108_set_rx_mode,
.ndo_get_stats = tsi108_get_stats,
- .ndo_do_ioctl = tsi108_do_ioctl,
+ .ndo_eth_ioctl = tsi108_do_ioctl,
.ndo_set_mac_address = tsi108_set_mac,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 73ca597ebd1b..961b623b7880 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -884,7 +884,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_set_rx_mode = rhine_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_eth_ioctl = netdev_ioctl,
.ndo_tx_timeout = rhine_tx_timeout,
.ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 88426b5e410b..278f49518d3f 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2637,7 +2637,7 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_rx_mode = velocity_set_multi,
.ndo_change_mtu = velocity_change_mtu,
- .ndo_do_ioctl = velocity_ioctl,
+ .ndo_eth_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 60a4f79b8fa1..db1994fb51c5 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1237,7 +1237,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 13cd799541aa..348c0ba5edcf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1227,7 +1227,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = axienet_ioctl,
+ .ndo_eth_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b06377fe7293..b780aad3550a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1263,7 +1263,7 @@ static const struct net_device_ops xemaclite_netdev_ops = {
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
- .ndo_do_ioctl = xemaclite_ioctl,
+ .ndo_eth_ioctl = xemaclite_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 4f6db6f5c272..ae611e46da6a 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -464,7 +464,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = do_start_xmit,
.ndo_tx_timeout = xirc_tx_timeout,
.ndo_set_config = do_config,
- .ndo_do_ioctl = do_ioctl,
+ .ndo_eth_ioctl = do_ioctl,
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 7ae754eadf22..ff50305d6e13 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1357,7 +1357,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_stop = eth_close,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
- .ndo_do_ioctl = eth_ioctl,
+ .ndo_eth_ioctl = eth_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 69c29a2ef95d..f62e98fada1a 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -70,6 +70,7 @@ static const char * const boot_msg =
/* Include files */
#include <linux/capability.h>
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -103,7 +104,8 @@ static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
static void skfp_ctl_set_multicast_list(struct net_device *dev);
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
-static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct net_device *dev);
static void send_queued_packets(struct s_smc *smc);
@@ -164,7 +166,7 @@ static const struct net_device_ops skfp_netdev_ops = {
.ndo_get_stats = skfp_ctl_get_stats,
.ndo_set_rx_mode = skfp_ctl_set_multicast_list,
.ndo_set_mac_address = skfp_ctl_set_mac_address,
- .ndo_do_ioctl = skfp_ioctl,
+ .ndo_siocdevprivate = skfp_siocdevprivate,
};
/*
@@ -932,9 +934,9 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
/*
- * ==============
- * = skfp_ioctl =
- * ==============
+ * =======================
+ * = skfp_siocdevprivate =
+ * =======================
*
* Overview:
*
@@ -954,16 +956,19 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
*/
-static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *lp = &smc->os;
struct s_skfp_ioctl ioc;
int status = 0;
- if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
+ if (copy_from_user(&ioc, data, sizeof(struct s_skfp_ioctl)))
return -EFAULT;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
switch (ioc.cmd) {
case SKFP_GET_STATS: /* Get the driver statistics */
ioc.len = sizeof(lp->MacStat);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 4435a1195194..775dcf4ebde5 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1005,7 +1005,8 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
struct baycom_state *bc = netdev_priv(dev);
struct hdlcdrv_ioctl hi;
@@ -1013,7 +1014,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
- if (copy_from_user(&hi, ifr->ifr_data, sizeof(hi)))
+ if (copy_from_user(&hi, data, sizeof(hi)))
return -EFAULT;
switch (hi.cmd) {
default:
@@ -1104,7 +1105,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return HDLCDRV_PARMASK_IOBASE;
}
- if (copy_to_user(ifr->ifr_data, &hi, sizeof(hi)))
+ if (copy_to_user(data, &hi, sizeof(hi)))
return -EFAULT;
return 0;
}
@@ -1114,7 +1115,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static const struct net_device_ops baycom_netdev_ops = {
.ndo_open = epp_open,
.ndo_stop = epp_close,
- .ndo_do_ioctl = baycom_ioctl,
+ .ndo_siocdevprivate = baycom_siocdevprivate,
.ndo_start_xmit = baycom_send_packet,
.ndo_set_mac_address = baycom_set_mac_address,
};
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 6a3dc7b3f28a..fd7da5bb1fa5 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -380,7 +380,7 @@ static int par96_close(struct net_device *dev)
* ===================== hdlcdrv driver interface =========================
*/
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
@@ -408,7 +408,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
@@ -428,7 +428,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_GETMODE:
strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -440,7 +440,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "par96,picpar");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -449,7 +449,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
}
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
@@ -464,7 +464,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
#endif /* BAYCOM_DEBUG */
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 04bb409707fc..646f605e358f 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -462,7 +462,7 @@ static int ser12_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
@@ -497,7 +497,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
@@ -519,7 +519,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
sprintf(hi->data.modename, "ser%u", bc->baud / 100);
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : "+");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -531,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "ser12,ser3,ser24");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -540,7 +540,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
}
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
@@ -555,7 +555,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
#endif /* BAYCOM_DEBUG */
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index a1acb3a47bdb..5d1ab4840753 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -521,7 +521,7 @@ static int ser12_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd);
/* --------------------------------------------------------------------- */
@@ -551,7 +551,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
/* --------------------------------------------------------------------- */
-static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
+static int baycom_ioctl(struct net_device *dev, void __user *data,
struct hdlcdrv_ioctl *hi, int cmd)
{
struct baycom_state *bc;
@@ -573,7 +573,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
strcpy(hi->data.modename, "ser12");
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -585,7 +585,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_MODELIST:
strcpy(hi->data.modename, "ser12");
- if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl)))
+ if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -594,7 +594,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
}
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
@@ -609,7 +609,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr,
#endif /* BAYCOM_DEBUG */
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 0e623c2e8b2d..d967b0748773 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -314,9 +314,10 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr)
* source ethernet address (broadcast
* or multicast: accept all)
*/
-static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int bpq_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- struct bpq_ethaddr __user *ethaddr = ifr->ifr_data;
+ struct bpq_ethaddr __user *ethaddr = data;
struct bpqdev *bpq = netdev_priv(dev);
struct bpq_req req;
@@ -325,7 +326,7 @@ static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCSBPQETHOPT:
- if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req)))
+ if (copy_from_user(&req, data, sizeof(struct bpq_req)))
return -EFAULT;
switch (req.cmd) {
case SIOCGBPQETHPARAM:
@@ -448,7 +449,7 @@ static const struct net_device_ops bpq_netdev_ops = {
.ndo_stop = bpq_close,
.ndo_start_xmit = bpq_xmit,
.ndo_set_mac_address = bpq_set_mac_address,
- .ndo_do_ioctl = bpq_ioctl,
+ .ndo_siocdevprivate = bpq_siocdevprivate,
};
static void bpq_setup(struct net_device *dev)
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index c25c8c99c5c7..b50b7fafd8d6 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -225,7 +225,8 @@ static int read_scc_data(struct scc_priv *priv);
static int scc_open(struct net_device *dev);
static int scc_close(struct net_device *dev);
-static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
static int scc_set_mac_address(struct net_device *dev, void *sa);
@@ -432,7 +433,7 @@ static const struct net_device_ops scc_netdev_ops = {
.ndo_open = scc_open,
.ndo_stop = scc_close,
.ndo_start_xmit = scc_send_packet,
- .ndo_do_ioctl = scc_ioctl,
+ .ndo_siocdevprivate = scc_siocdevprivate,
.ndo_set_mac_address = scc_set_mac_address,
};
@@ -881,15 +882,13 @@ static int scc_close(struct net_device *dev)
}
-static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct scc_priv *priv = dev->ml_priv;
switch (cmd) {
case SIOCGSCCPARAM:
- if (copy_to_user
- (ifr->ifr_data, &priv->param,
- sizeof(struct scc_param)))
+ if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
return -EFAULT;
return 0;
case SIOCSSCCPARAM:
@@ -897,13 +896,12 @@ static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EPERM;
if (netif_running(dev))
return -EAGAIN;
- if (copy_from_user
- (&priv->param, ifr->ifr_data,
- sizeof(struct scc_param)))
+ if (copy_from_user(&priv->param, data,
+ sizeof(struct scc_param)))
return -EFAULT;
return 0;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index cbaf1cdde7cb..5805cfc83854 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -483,23 +483,25 @@ static int hdlcdrv_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
struct hdlcdrv_state *s = netdev_priv(dev);
struct hdlcdrv_ioctl bi;
- if (cmd != SIOCDEVPRIVATE) {
- if (s->ops && s->ops->ioctl)
- return s->ops->ioctl(dev, ifr, &bi, cmd);
+ if (cmd != SIOCDEVPRIVATE)
return -ENOIOCTLCMD;
- }
- if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi)))
+
+ if (in_compat_syscall()) /* to be implemented */
+ return -ENOIOCTLCMD;
+
+ if (copy_from_user(&bi, data, sizeof(bi)))
return -EFAULT;
switch (bi.cmd) {
default:
if (s->ops && s->ops->ioctl)
- return s->ops->ioctl(dev, ifr, &bi, cmd);
+ return s->ops->ioctl(dev, data, &bi, cmd);
return -ENOIOCTLCMD;
case HDLCDRVCTL_GETCHANNELPAR:
@@ -605,7 +607,7 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
- if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi)))
+ if (copy_to_user(data, &bi, sizeof(bi)))
return -EFAULT;
return 0;
@@ -617,7 +619,7 @@ static const struct net_device_ops hdlcdrv_netdev = {
.ndo_open = hdlcdrv_open,
.ndo_stop = hdlcdrv_close,
.ndo_start_xmit = hdlcdrv_send_packet,
- .ndo_do_ioctl = hdlcdrv_ioctl,
+ .ndo_siocdevprivate = hdlcdrv_siocdevprivate,
.ndo_set_mac_address = hdlcdrv_set_mac_address,
};
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 3f1edd0526a4..e0bb131a33d7 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -210,7 +210,8 @@ static int scc_net_close(struct net_device *dev);
static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
static netdev_tx_t scc_net_tx(struct sk_buff *skb,
struct net_device *dev);
-static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int scc_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int scc_net_set_mac_address(struct net_device *dev, void *addr);
static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
@@ -1550,7 +1551,7 @@ static const struct net_device_ops scc_netdev_ops = {
.ndo_start_xmit = scc_net_tx,
.ndo_set_mac_address = scc_net_set_mac_address,
.ndo_get_stats = scc_net_get_stats,
- .ndo_do_ioctl = scc_net_ioctl,
+ .ndo_siocdevprivate = scc_net_siocdevprivate,
};
/* ----> Initialize device <----- */
@@ -1703,7 +1704,8 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
* SIOCSCCCAL - send calib. pattern arg: (struct scc_calibrate *) arg
*/
-static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int scc_net_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifr, void __user *arg, int cmd)
{
struct scc_kiss_cmd kiss_cmd;
struct scc_mem_config memcfg;
@@ -1712,8 +1714,6 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
int chan;
unsigned char device_name[IFNAMSIZ];
- void __user *arg = ifr->ifr_data;
-
if (!Driver_Initialized)
{
@@ -1722,6 +1722,9 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
int found = 1;
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
if (!arg) return -EFAULT;
if (Nchips >= SCC_MAXCHIPS)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index d4911041596c..6ddacbdb224b 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -920,15 +920,15 @@ static int yam_close(struct net_device *dev)
/* --------------------------------------------------------------------- */
-static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
- if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
- return -EFAULT;
+ if (copy_from_user(&ioctl_cmd, data, sizeof(int)))
+ return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
@@ -947,8 +947,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
- ym = memdup_user(ifr->ifr_data,
- sizeof(struct yamdrv_ioctl_mcs));
+ ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
if (ym->cmd != SIOCYAMSMCS)
@@ -965,8 +964,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
- return -EFAULT;
+ if (copy_from_user(&yi, data, sizeof(struct yamdrv_ioctl_cfg)))
+ return -EFAULT;
if (yi.cmd != SIOCYAMSCFG)
return -EINVAL;
@@ -1045,8 +1044,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
- if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
- return -EFAULT;
+ if (copy_to_user(data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
+ return -EFAULT;
break;
default:
@@ -1074,7 +1073,7 @@ static const struct net_device_ops yam_netdev_ops = {
.ndo_open = yam_open,
.ndo_stop = yam_close,
.ndo_start_xmit = yam_send_packet,
- .ndo_do_ioctl = yam_ioctl,
+ .ndo_siocdevprivate = yam_siocdevprivate,
.ndo_set_mac_address = yam_set_mac_address,
};
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 22010384c4a3..7661dbb31162 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -63,7 +63,7 @@ static const char version[] =
static const struct net_device_ops rr_netdev_ops = {
.ndo_open = rr_open,
.ndo_stop = rr_close,
- .ndo_do_ioctl = rr_ioctl,
+ .ndo_siocdevprivate = rr_siocdevprivate,
.ndo_start_xmit = rr_start_xmit,
.ndo_set_mac_address = hippi_mac_addr,
};
@@ -1568,7 +1568,8 @@ out:
}
-static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct rr_private *rrpriv;
unsigned char *image, *oldimage;
@@ -1603,7 +1604,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
error = -EFAULT;
goto gf_out;
}
- error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
+ error = copy_to_user(data, image, EEPROM_BYTES);
if (error)
error = -EFAULT;
gf_out:
@@ -1615,7 +1616,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EPERM;
}
- image = memdup_user(rq->ifr_data, EEPROM_BYTES);
+ image = memdup_user(data, EEPROM_BYTES);
if (IS_ERR(image))
return PTR_ERR(image);
@@ -1658,7 +1659,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return error;
case SIOCRRID:
- return put_user(0x52523032, (int __user *)rq->ifr_data);
+ return put_user(0x52523032, (int __user *)data);
default:
return error;
}
diff --git a/drivers/net/hippi/rrunner.h b/drivers/net/hippi/rrunner.h
index 87533784604f..55377614e752 100644
--- a/drivers/net/hippi/rrunner.h
+++ b/drivers/net/hippi/rrunner.h
@@ -835,7 +835,8 @@ static int rr_open(struct net_device *dev);
static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int rr_close(struct net_device *dev);
-static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
unsigned long offset,
unsigned char *buf,
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 506f8d5cd4ee..75435d40b920 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -1,6 +1,3 @@
-# Un-comment the next line if you want to validate configuration data
-#ccflags-y += -DIPA_VALIDATE
-
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 427c68b2ad8f..a2fcdb1abdb9 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -198,77 +198,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
}
-/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */
-static void gsi_irq_setup(struct gsi *gsi)
-{
- /* Disable all interrupt types */
- gsi_irq_type_update(gsi, 0);
-
- /* Clear all type-specific interrupt masks */
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
-
- /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
- if (gsi->version > IPA_VERSION_3_1) {
- u32 offset;
-
- /* These registers are in the non-adjusted address range */
- offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
- iowrite32(0, gsi->virt_raw + offset);
- offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
- iowrite32(0, gsi->virt_raw + offset);
- }
-
- iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
-}
-
-/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
-static int gsi_ring_setup(struct gsi *gsi)
-{
- struct device *dev = gsi->dev;
- u32 count;
- u32 val;
-
- if (gsi->version < IPA_VERSION_3_5_1) {
- /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
- gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
- gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
-
- return 0;
- }
-
- val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
-
- count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
- if (!count) {
- dev_err(dev, "GSI reports zero channels supported\n");
- return -EINVAL;
- }
- if (count > GSI_CHANNEL_COUNT_MAX) {
- dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
- GSI_CHANNEL_COUNT_MAX, count);
- count = GSI_CHANNEL_COUNT_MAX;
- }
- gsi->channel_count = count;
-
- count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
- if (!count) {
- dev_err(dev, "GSI reports zero event rings supported\n");
- return -EINVAL;
- }
- if (count > GSI_EVT_RING_COUNT_MAX) {
- dev_warn(dev,
- "limiting to %u event rings; hardware supports %u\n",
- GSI_EVT_RING_COUNT_MAX, count);
- count = GSI_EVT_RING_COUNT_MAX;
- }
- gsi->evt_ring_count = count;
-
- return 0;
-}
-
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
@@ -920,12 +849,13 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* All done! */
}
-static int __gsi_channel_start(struct gsi_channel *channel, bool start)
+static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
{
struct gsi *gsi = channel->gsi;
int ret;
- if (!start)
+ /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
+ if (resume && gsi->version < IPA_VERSION_4_0)
return 0;
mutex_lock(&gsi->mutex);
@@ -947,7 +877,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
napi_enable(&channel->napi);
gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
- ret = __gsi_channel_start(channel, true);
+ ret = __gsi_channel_start(channel, false);
if (ret) {
gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
napi_disable(&channel->napi);
@@ -971,7 +901,7 @@ static int gsi_channel_stop_retry(struct gsi_channel *channel)
return ret;
}
-static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
+static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
{
struct gsi *gsi = channel->gsi;
int ret;
@@ -979,7 +909,8 @@ static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
/* Wait for any underway transactions to complete before stopping. */
gsi_channel_trans_quiesce(channel);
- if (!stop)
+ /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
+ if (suspend && gsi->version < IPA_VERSION_4_0)
return 0;
mutex_lock(&gsi->mutex);
@@ -997,7 +928,7 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
- ret = __gsi_channel_stop(channel, true);
+ ret = __gsi_channel_stop(channel, false);
if (ret)
return ret;
@@ -1026,13 +957,13 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
mutex_unlock(&gsi->mutex);
}
-/* Stop a STARTED channel for suspend (using stop if requested) */
-int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
+/* Stop a started channel for suspend */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
int ret;
- ret = __gsi_channel_stop(channel, stop);
+ ret = __gsi_channel_stop(channel, true);
if (ret)
return ret;
@@ -1042,12 +973,24 @@ int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
return 0;
}
-/* Resume a suspended channel (starting will be requested if STOPPED) */
-int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
+/* Resume a suspended channel (starting if stopped) */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- return __gsi_channel_start(channel, start);
+ return __gsi_channel_start(channel, true);
+}
+
+/* Prevent all GSI interrupts while suspended */
+void gsi_suspend(struct gsi *gsi)
+{
+ disable_irq(gsi->irq);
+}
+
+/* Allow all GSI interrupts again when resuming */
+void gsi_resume(struct gsi *gsi)
+{
+ enable_irq(gsi->irq);
}
/**
@@ -1372,33 +1315,20 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- unsigned int irq;
int ret;
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0)
return ret ? : -EINVAL;
- irq = ret;
-
- ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
- if (ret) {
- dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
- return ret;
- }
- gsi->irq = irq;
+ gsi->irq = ret;
return 0;
}
-static void gsi_irq_exit(struct gsi *gsi)
-{
- free_irq(gsi->irq, gsi);
-}
-
/* Return the transaction associated with a transfer completion event */
static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
struct gsi_event *event)
@@ -1876,6 +1806,93 @@ static void gsi_channel_teardown(struct gsi *gsi)
gsi_irq_disable(gsi);
}
+/* Turn off all GSI interrupts initially */
+static int gsi_irq_setup(struct gsi *gsi)
+{
+ int ret;
+
+ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
+ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+
+ /* Disable all interrupt types */
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear all type-specific interrupt masks */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
+ if (gsi->version > IPA_VERSION_3_1) {
+ u32 offset;
+
+ /* These registers are in the non-adjusted address range */
+ offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
+ iowrite32(0, gsi->virt_raw + offset);
+ offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
+ iowrite32(0, gsi->virt_raw + offset);
+ }
+
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+
+ ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
+ if (ret)
+ dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+
+ return ret;
+}
+
+static void gsi_irq_teardown(struct gsi *gsi)
+{
+ free_irq(gsi->irq, gsi);
+}
+
+/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
+static int gsi_ring_setup(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ u32 count;
+ u32 val;
+
+ if (gsi->version < IPA_VERSION_3_5_1) {
+ /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+
+ return 0;
+ }
+
+ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+ count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ if (!count) {
+ dev_err(dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
+ GSI_CHANNEL_COUNT_MAX, count);
+ count = GSI_CHANNEL_COUNT_MAX;
+ }
+ gsi->channel_count = count;
+
+ count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (!count) {
+ dev_err(dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(dev,
+ "limiting to %u event rings; hardware supports %u\n",
+ GSI_EVT_RING_COUNT_MAX, count);
+ count = GSI_EVT_RING_COUNT_MAX;
+ }
+ gsi->evt_ring_count = count;
+
+ return 0;
+}
+
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi)
{
@@ -1889,25 +1906,34 @@ int gsi_setup(struct gsi *gsi)
return -EIO;
}
- gsi_irq_setup(gsi); /* No matching teardown required */
+ ret = gsi_irq_setup(gsi);
+ if (ret)
+ return ret;
ret = gsi_ring_setup(gsi); /* No matching teardown required */
if (ret)
- return ret;
+ goto err_irq_teardown;
/* Initialize the error log */
iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
- /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
- iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+ ret = gsi_channel_setup(gsi);
+ if (ret)
+ goto err_irq_teardown;
- return gsi_channel_setup(gsi);
+ return 0;
+
+err_irq_teardown:
+ gsi_irq_teardown(gsi);
+
+ return ret;
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
+ gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
@@ -1964,7 +1990,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
-#ifdef IPA_VALIDATION
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2010,7 +2035,6 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
channel_id, data->channel.event_count);
return false;
}
-#endif /* IPA_VALIDATION */
return true;
}
@@ -2206,20 +2230,18 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
init_completion(&gsi->completion);
- ret = gsi_irq_init(gsi, pdev);
+ ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret)
goto err_iounmap;
ret = gsi_channel_init(gsi, count, data);
if (ret)
- goto err_irq_exit;
+ goto err_iounmap;
mutex_init(&gsi->mutex);
return 0;
-err_irq_exit:
- gsi_irq_exit(gsi);
err_iounmap:
iounmap(gsi->virt_raw);
@@ -2231,7 +2253,6 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- gsi_irq_exit(gsi);
iounmap(gsi->virt_raw);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 81cd7b07f6e1..88b80dc3db79 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -232,8 +232,35 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
*/
void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
-int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
-int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
+/**
+ * gsi_suspend() - Prepare the GSI subsystem for suspend
+ * @gsi: GSI pointer
+ */
+void gsi_suspend(struct gsi *gsi);
+
+/**
+ * gsi_resume() - Resume the GSI subsystem following suspend
+ * @gsi: GSI pointer
+ */
+void gsi_resume(struct gsi *gsi);
+
+/**
+ * gsi_channel_suspend() - Suspend a GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to suspend
+ *
+ * For IPA v4.0+, suspend is implemented by stopping the channel.
+ */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_resume() - Resume a suspended GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to resume
+ *
+ * For IPA v4.0+, the stopped channel is started again.
+ */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id);
/**
* gsi_init() - Initialize the GSI subsystem
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 8c795a6a8598..1544564bc283 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -90,14 +90,12 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
{
void *virt;
-#ifdef IPA_VALIDATE
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
-#endif /* IPA_VALIDATE */
/* By allocating a few extra entries in our pool (one less
* than the maximum number that will be requested in a
@@ -140,14 +138,12 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
dma_addr_t addr;
void *virt;
-#ifdef IPA_VALIDATE
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
if (!max_alloc)
return -EINVAL;
-#endif /* IPA_VALIDATE */
/* Don't let allocations cross a power-of-two boundary */
size = __roundup_pow_of_two(size);
@@ -188,8 +184,8 @@ static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
{
u32 offset;
- /* assert(count > 0); */
- /* assert(count <= pool->max_alloc); */
+ WARN_ON(!count);
+ WARN_ON(count > pool->max_alloc);
/* Allocate from beginning if wrap would occur */
if (count > pool->count - pool->free)
@@ -225,9 +221,10 @@ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
{
void *end = pool->base + pool->count * pool->size;
- /* assert(element >= pool->base); */
- /* assert(element < end); */
- /* assert(pool->max_alloc == 1); */
+ WARN_ON(element < pool->base);
+ WARN_ON(element >= end);
+ WARN_ON(pool->max_alloc != 1);
+
element += pool->size;
return element < end ? element : pool->base;
@@ -332,7 +329,8 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
- /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
+ if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
+ return NULL;
trans_info = &channel->trans_info;
@@ -408,7 +406,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
u32 which = trans->used++;
struct scatterlist *sg;
- /* assert(which < trans->tre_count); */
+ WARN_ON(which >= trans->tre_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
@@ -441,8 +439,10 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
- /* assert(trans->tre_count == 1); */
- /* assert(!trans->used); */
+ if (WARN_ON(trans->tre_count != 1))
+ return -EINVAL;
+ if (WARN_ON(trans->used))
+ return -EINVAL;
sg_set_page(sg, page, size, offset);
ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
@@ -461,8 +461,10 @@ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
u32 used;
int ret;
- /* assert(trans->tre_count == 1); */
- /* assert(!trans->used); */
+ if (WARN_ON(trans->tre_count != 1))
+ return -EINVAL;
+ if (WARN_ON(trans->used))
+ return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -550,7 +552,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
u32 avail;
u32 i;
- /* assert(trans->used > 0); */
+ WARN_ON(!trans->used);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 744406832a77..34152fe02963 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -28,19 +28,8 @@ struct ipa_smp2p;
struct ipa_interrupt;
/**
- * enum ipa_flag - IPA state flags
- * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
- * @IPA_FLAG_COUNT: Number of defined IPA flags
- */
-enum ipa_flag {
- IPA_FLAG_RESUMED,
- IPA_FLAG_COUNT, /* Last; not a flag */
-};
-
-/**
* struct ipa - IPA information
* @gsi: Embedded GSI structure
- * @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
* @completion: Used to signal pipeline clear transfer complete
@@ -51,6 +40,7 @@ enum ipa_flag {
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
* @interrupt: IPA Interrupt information
+ * @uc_clocked: true if clock is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
* @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
@@ -82,7 +72,6 @@ enum ipa_flag {
*/
struct ipa {
struct gsi gsi;
- DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
struct completion completion;
@@ -95,6 +84,7 @@ struct ipa {
__le64 *table_virt;
struct ipa_interrupt *interrupt;
+ bool uc_clocked;
bool uc_loaded;
dma_addr_t reg_addr;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 69ef6ea41e61..8f25107c1f1e 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -4,14 +4,16 @@
* Copyright (C) 2018-2021 Linaro Ltd.
*/
-#include <linux/refcount.h>
-#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
#include "ipa.h"
#include "ipa_clock.h"
+#include "ipa_endpoint.h"
#include "ipa_modem.h"
#include "ipa_data.h"
@@ -43,17 +45,29 @@ struct ipa_interconnect {
};
/**
+ * enum ipa_power_flag - IPA power flags
+ * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
+ * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
+ * @IPA_POWER_FLAG_COUNT: Number of defined power flags
+ */
+enum ipa_power_flag {
+ IPA_POWER_FLAG_RESUMED,
+ IPA_POWER_FLAG_SYSTEM,
+ IPA_POWER_FLAG_COUNT, /* Last; not a flag */
+};
+
+/**
* struct ipa_clock - IPA clocking information
- * @count: Clocking reference count
- * @mutex: Protects clock enable/disable
+ * @dev: IPA device pointer
* @core: IPA core clock
+ * @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
struct ipa_clock {
- refcount_t count;
- struct mutex mutex; /* protects clock enable/disable */
+ struct device *dev;
struct clk *core;
+ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct ipa_interconnect *interconnect;
};
@@ -144,8 +158,12 @@ static int ipa_interconnect_enable(struct ipa *ipa)
ret = icc_set_bw(interconnect->path,
interconnect->average_bandwidth,
interconnect->peak_bandwidth);
- if (ret)
+ if (ret) {
+ dev_err(&ipa->pdev->dev,
+ "error %d enabling %s interconnect\n",
+ ret, icc_get_name(interconnect->path));
goto out_unwind;
+ }
interconnect++;
}
@@ -159,10 +177,11 @@ out_unwind:
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static void ipa_interconnect_disable(struct ipa *ipa)
+static int ipa_interconnect_disable(struct ipa *ipa)
{
struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ struct device *dev = &ipa->pdev->dev;
int result = 0;
u32 count;
int ret;
@@ -172,13 +191,16 @@ static void ipa_interconnect_disable(struct ipa *ipa)
while (count--) {
interconnect--;
ret = icc_set_bw(interconnect->path, 0, 0);
- if (ret && !result)
- result = ret;
+ if (ret) {
+ dev_err(dev, "error %d disabling %s interconnect\n",
+ ret, icc_get_name(interconnect->path));
+ /* Try to disable all; record only the first error */
+ if (!result)
+ result = ret;
+ }
}
- if (result)
- dev_err(&ipa->pdev->dev,
- "error %d disabling IPA interconnects\n", ret);
+ return result;
}
/* Turn on IPA clocks, including interconnects */
@@ -191,78 +213,95 @@ static int ipa_clock_enable(struct ipa *ipa)
return ret;
ret = clk_prepare_enable(ipa->clock->core);
- if (ret)
- ipa_interconnect_disable(ipa);
+ if (ret) {
+ dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
+ (void)ipa_interconnect_disable(ipa);
+ }
return ret;
}
/* Inverse of ipa_clock_enable() */
-static void ipa_clock_disable(struct ipa *ipa)
+static int ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- ipa_interconnect_disable(ipa);
-}
-/* Get an IPA clock reference, but only if the reference count is
- * already non-zero. Returns true if the additional reference was
- * added successfully, or false otherwise.
- */
-bool ipa_clock_get_additional(struct ipa *ipa)
-{
- return refcount_inc_not_zero(&ipa->clock->count);
+ return ipa_interconnect_disable(ipa);
}
-/* Get an IPA clock reference. If the reference count is non-zero, it is
- * incremented and return is immediate. Otherwise it is checked again
- * under protection of the mutex, and if appropriate the IPA clock
- * is enabled.
- *
- * Incrementing the reference count is intentionally deferred until
- * after the clock is running and endpoints are resumed.
- */
-void ipa_clock_get(struct ipa *ipa)
+static int ipa_runtime_suspend(struct device *dev)
{
- struct ipa_clock *clock = ipa->clock;
- int ret;
+ struct ipa *ipa = dev_get_drvdata(dev);
- /* If the clock is running, just bump the reference count */
- if (ipa_clock_get_additional(ipa))
- return;
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags);
+ ipa_endpoint_suspend(ipa);
+ gsi_suspend(&ipa->gsi);
+ }
- /* Otherwise get the mutex and check again */
- mutex_lock(&clock->mutex);
+ return ipa_clock_disable(ipa);
+}
- /* A reference might have been added before we got the mutex. */
- if (ipa_clock_get_additional(ipa))
- goto out_mutex_unlock;
+static int ipa_runtime_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+ int ret;
ret = ipa_clock_enable(ipa);
- if (ret) {
- dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
- goto out_mutex_unlock;
+ if (WARN_ON(ret < 0))
+ return ret;
+
+ /* Endpoints aren't usable until setup is complete */
+ if (ipa->setup_complete) {
+ gsi_resume(&ipa->gsi);
+ ipa_endpoint_resume(ipa);
}
- refcount_set(&clock->count, 1);
+ return 0;
+}
+
+static int ipa_runtime_idle(struct device *dev)
+{
+ return -EAGAIN;
+}
-out_mutex_unlock:
- mutex_unlock(&clock->mutex);
+/* Get an IPA clock reference. If the reference count is non-zero, it is
+ * incremented and return is immediate. Otherwise the IPA clock is
+ * enabled.
+ */
+int ipa_clock_get(struct ipa *ipa)
+{
+ return pm_runtime_get_sync(&ipa->pdev->dev);
}
/* Attempt to remove an IPA clock reference. If this represents the
- * last reference, disable the IPA clock under protection of the mutex.
+ * last reference, disable the IPA clock.
*/
-void ipa_clock_put(struct ipa *ipa)
+int ipa_clock_put(struct ipa *ipa)
{
- struct ipa_clock *clock = ipa->clock;
+ return pm_runtime_put(&ipa->pdev->dev);
+}
+
+static int ipa_suspend(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->clock->flags);
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int ipa_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+ int ret;
- /* If this is not the last reference there's nothing more to do */
- if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
- return;
+ ret = pm_runtime_force_resume(dev);
- ipa_clock_disable(ipa);
+ __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->clock->flags);
- mutex_unlock(&clock->mutex);
+ return ret;
}
/* Return the current IPA core clock rate */
@@ -271,6 +310,50 @@ u32 ipa_clock_rate(struct ipa *ipa)
return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
}
+/**
+ * ipa_suspend_handler() - Handle the suspend IPA interrupt
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt type (unused)
+ *
+ * If an RX endpoint is suspended, and the IPA has a packet destined for
+ * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
+ * that it should resume the endpoint. If we get one of these interrupts
+ * we just wake up the system.
+ */
+static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* To handle an IPA interrupt we will have resumed the hardware
+ * just to handle the interrupt, so we're done. If we are in a
+ * system suspend, trigger a system resume.
+ */
+ if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags))
+ if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->clock->flags))
+ pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
+
+ /* Acknowledge/clear the suspend interrupt on all endpoints */
+ ipa_interrupt_suspend_clear_all(ipa->interrupt);
+}
+
+int ipa_power_setup(struct ipa *ipa)
+{
+ int ret;
+
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
+ ipa_suspend_handler);
+
+ ret = device_init_wakeup(&ipa->pdev->dev, true);
+ if (ret)
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+
+ return ret;
+}
+
+void ipa_power_teardown(struct ipa *ipa)
+{
+ (void)device_init_wakeup(&ipa->pdev->dev, false);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+}
+
/* Initialize IPA clocking */
struct ipa_clock *
ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
@@ -298,6 +381,7 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
ret = -ENOMEM;
goto err_clk_put;
}
+ clock->dev = dev;
clock->core = clk;
clock->interconnect_count = data->interconnect_count;
@@ -305,8 +389,8 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
if (ret)
goto err_kfree;
- mutex_init(&clock->mutex);
- refcount_set(&clock->count, 0);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_enable(dev);
return clock;
@@ -323,9 +407,16 @@ void ipa_clock_exit(struct ipa_clock *clock)
{
struct clk *clk = clock->core;
- WARN_ON(refcount_read(&clock->count) != 0);
- mutex_destroy(&clock->mutex);
+ pm_runtime_disable(clock->dev);
ipa_interconnect_exit(clock);
kfree(clock);
clk_put(clk);
}
+
+const struct dev_pm_ops ipa_pm_ops = {
+ .suspend = ipa_suspend,
+ .resume = ipa_resume,
+ .runtime_suspend = ipa_runtime_suspend,
+ .runtime_resume = ipa_runtime_resume,
+ .runtime_idle = ipa_runtime_idle,
+};
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
index 1fe634760e59..5c53241336a1 100644
--- a/drivers/net/ipa/ipa_clock.h
+++ b/drivers/net/ipa/ipa_clock.h
@@ -11,6 +11,9 @@ struct device;
struct ipa;
struct ipa_clock_data;
+/* IPA device power management function block */
+extern const struct dev_pm_ops ipa_pm_ops;
+
/**
* ipa_clock_rate() - Return the current IPA core clock rate
* @ipa: IPA structure
@@ -20,6 +23,20 @@ struct ipa_clock_data;
u32 ipa_clock_rate(struct ipa *ipa);
/**
+ * ipa_power_setup() - Set up IPA power management
+ * @ipa: IPA pointer
+ *
+ * Return: 0 if successful, or a negative error code
+ */
+int ipa_power_setup(struct ipa *ipa);
+
+/**
+ * ipa_power_teardown() - Inverse of ipa_power_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_power_teardown(struct ipa *ipa);
+
+/**
* ipa_clock_init() - Initialize IPA clocking
* @dev: IPA device
* @data: Clock configuration data
@@ -39,26 +56,24 @@ void ipa_clock_exit(struct ipa_clock *clock);
* ipa_clock_get() - Get an IPA clock reference
* @ipa: IPA pointer
*
- * This call blocks if this is the first reference.
- */
-void ipa_clock_get(struct ipa *ipa);
-
-/**
- * ipa_clock_get_additional() - Get an IPA clock reference if not first
- * @ipa: IPA pointer
+ * Return: 0 if clock started, 1 if clock already running, or a negative
+ * error code
*
- * This returns immediately, and only takes a reference if not the first
+ * This call blocks if this is the first reference. A reference is
+ * taken even if an error occurs starting the IPA clock.
*/
-bool ipa_clock_get_additional(struct ipa *ipa);
+int ipa_clock_get(struct ipa *ipa);
/**
* ipa_clock_put() - Drop an IPA clock reference
* @ipa: IPA pointer
*
+ * Return: 0 if successful, or a negative error code
+ *
* This drops a clock reference. If the last reference is being dropped,
* the clock is stopped and RX endpoints are suspended. This call will
* not block unless the last reference is dropped.
*/
-void ipa_clock_put(struct ipa *ipa);
+int ipa_clock_put(struct ipa *ipa);
#endif /* _IPA_CLOCK_H_ */
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index af44ca41189e..cff51731195a 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -159,35 +159,49 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
#undef TABLE_COUNT_MAX
#undef TABLE_SIZE
-}
-#ifdef IPA_VALIDATE
+ /* Hashed and non-hashed fields are assumed to be the same size */
+ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
+ field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+ BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
+ field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
+
+ /* Valid endpoint numbers must fit in the IP packet init command */
+ BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
+ IPA_ENDPOINT_MAX - 1);
+}
/* Validate a memory region holding a table */
-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
- bool route, bool ipv6, bool hashed)
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
{
+ u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+ const char *table = route ? "route" : "filter";
struct device *dev = &ipa->pdev->dev;
- u32 offset_max;
- offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
- : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ /* Size must fit in the immediate command field that holds it */
+ if (mem->size > size_max) {
+ dev_err(dev, "%s table region size too large\n", table);
+ dev_err(dev, " (0x%04x > 0x%04x)\n",
+ mem->size, size_max);
+
+ return false;
+ }
+
+ /* Offset must fit in the immediate command field that holds it */
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
- dev_err(dev, "IPv%c %s%s table region offset too large\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter");
+ dev_err(dev, "%s table region offset too large\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
return false;
}
+ /* Entire memory range must fit within IPA-local memory */
if (mem->offset > ipa->mem_size ||
mem->size > ipa->mem_size - mem->offset) {
- dev_err(dev, "IPv%c %s%s table region out of range\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter");
+ dev_err(dev, "%s table region out of range\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, mem->size, ipa->mem_size);
@@ -331,7 +345,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa)
return true;
}
-#endif /* IPA_VALIDATE */
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
@@ -522,9 +535,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
- /* assert(endpoint_id <
- field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
-
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
@@ -548,8 +558,9 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
u16 flags;
/* size and offset must fit in 16 bit fields */
- /* assert(size > 0 && size <= U16_MAX); */
- /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
+ WARN_ON(!size);
+ WARN_ON(size > U16_MAX);
+ WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
offset += ipa->mem_offset;
@@ -588,8 +599,6 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
- /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
-
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index b99262281f41..69cd085d427d 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -57,20 +57,16 @@ struct ipa_cmd_info {
enum dma_data_direction direction;
};
-#ifdef IPA_VALIDATE
-
/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
* @route: - Whether the region holds a route or filter table
- * @ipv6: - Whether the table is for IPv6 or IPv4
- * @hashed: - Whether the table is hashed or non-hashed
*
* Return: true if region is valid, false otherwise
*/
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
- bool route, bool ipv6, bool hashed);
+ bool route);
/**
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
@@ -80,22 +76,6 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
*/
bool ipa_cmd_data_valid(struct ipa *ipa);
-#else /* !IPA_VALIDATE */
-
-static inline bool ipa_cmd_table_valid(struct ipa *ipa,
- const struct ipa_mem *mem, bool route,
- bool ipv6, bool hashed)
-{
- return true;
-}
-
-static inline bool ipa_cmd_data_valid(struct ipa *ipa)
-{
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/**
* ipa_cmd_pool_init() - initialize command channel pools
* @channel: AP->IPA command TX GSI channel pointer
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index 9353efbd504f..782f67e3e079 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -105,6 +105,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,6 +129,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -368,18 +370,13 @@ static const struct ipa_mem_data ipa_mem_data = {
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
.name = "memory",
- .peak_bandwidth = 465000, /* 465 MBps */
- .average_bandwidth = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- {
- .name = "imem",
- .peak_bandwidth = 68570, /* 68.57 MBps */
- .average_bandwidth = 80000, /* 80 MBps (unused?) */
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
},
+ /* Average rate is unused for the next interconnect */
{
.name = "config",
- .peak_bandwidth = 30000, /* 30 MBps */
+ .peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
};
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index a99b6478fa3a..db6fda2fe43d 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -114,6 +114,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -137,6 +138,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index 798d43e1eb13..8d83e14819e2 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -106,6 +106,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -129,6 +130,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.config = {
.resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -416,18 +418,13 @@ static const struct ipa_mem_data ipa_mem_data = {
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
- .name = "ipa_to_llcc",
+ .name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
- {
- .name = "llcc_to_ebi1",
- .peak_bandwidth = 1804000, /* 1.804 GBps */
- .average_bandwidth = 150000, /* 150 MBps */
- },
/* Average rate is unused for the next interconnect */
{
- .name = "appss_to_ipa",
+ .name = "config",
.peak_bandwidth = 74000, /* 74 MBps */
.average_bandwidth = 0, /* unused */
},
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ab02669bae4e..08ee37ae2881 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -250,17 +250,18 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
/* Suspend is not supported for IPA v4.0+. Delay doesn't work
* correctly on IPA v4.2.
- *
- * if (endpoint->toward_ipa)
- * assert(ipa->version != IPA_VERSION_4.2);
- * else
- * assert(ipa->version < IPA_VERSION_4_0);
*/
+ if (endpoint->toward_ipa)
+ WARN_ON(ipa->version == IPA_VERSION_4_2);
+ else
+ WARN_ON(ipa->version >= IPA_VERSION_4_0);
+
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
val = ioread32(ipa->reg_virt + offset);
- /* Don't bother if it's already in the requested state */
state = !!(val & mask);
+
+ /* Don't bother if it's already in the requested state */
if (suspend_delay != state) {
val ^= mask;
iowrite32(val, ipa->reg_virt + offset);
@@ -273,7 +274,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
- /* assert(endpoint->toward_ipa); */
+ WARN_ON(!endpoint->toward_ipa);
/* Delay mode doesn't work properly for IPA v4.2 */
if (endpoint->ipa->version != IPA_VERSION_4_2)
@@ -287,7 +288,8 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 offset;
u32 val;
- /* assert(mask & ipa->available); */
+ WARN_ON(!(mask & ipa->available));
+
offset = ipa_reg_state_aggr_active_offset(ipa->version);
val = ioread32(ipa->reg_virt + offset);
@@ -299,7 +301,8 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- /* assert(mask & ipa->available); */
+ WARN_ON(!(mask & ipa->available));
+
iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
}
@@ -338,7 +341,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
if (endpoint->ipa->version >= IPA_VERSION_4_0)
return enable; /* For IPA v4.0+, no change made */
- /* assert(!endpoint->toward_ipa); */
+ WARN_ON(endpoint->toward_ipa);
suspended = ipa_endpoint_init_ctrl(endpoint, enable);
@@ -1156,7 +1159,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
if (!endpoint->netdev)
return false;
- /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
+ WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
+
skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
if (skb) {
/* Reserve the headroom and account for the data */
@@ -1583,7 +1587,6 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
- bool stop_channel;
int ret;
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
@@ -1594,11 +1597,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
(void)ipa_endpoint_program_suspend(endpoint, true);
}
- /* Starting with IPA v4.0, endpoints are suspended by stopping the
- * underlying GSI channel rather than using endpoint suspend mode.
- */
- stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
- ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
+ ret = gsi_channel_suspend(gsi, endpoint->channel_id);
if (ret)
dev_err(dev, "error %d suspending channel %u\n", ret,
endpoint->channel_id);
@@ -1608,7 +1607,6 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{
struct device *dev = &endpoint->ipa->pdev->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
- bool start_channel;
int ret;
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
@@ -1617,11 +1615,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, false);
- /* Starting with IPA v4.0, the underlying GSI channel must be
- * restarted for resume.
- */
- start_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
- ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
+ ret = gsi_channel_resume(gsi, endpoint->channel_id);
if (ret)
dev_err(dev, "error %d resuming channel %u\n", ret,
endpoint->channel_id);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c46df0b7c4e5..934c14e066a0 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -74,21 +74,28 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
iowrite32(mask, ipa->reg_virt + offset);
}
-/* Process all IPA interrupt types that have been signaled */
-static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
+/* IPA IRQ handler is threaded */
+static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
{
+ struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ u32 pending;
u32 offset;
u32 mask;
+ int ret;
+
+ ret = ipa_clock_get(ipa);
+ if (WARN_ON(ret < 0))
+ goto out_clock_put;
/* The status register indicates which conditions are present,
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
offset = ipa_reg_irq_stts_offset(ipa->version);
- mask = ioread32(ipa->reg_virt + offset);
- while ((mask &= enabled)) {
+ pending = ioread32(ipa->reg_virt + offset);
+ while ((mask = pending & enabled)) {
do {
u32 irq_id = __ffs(mask);
@@ -96,43 +103,20 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
ipa_interrupt_process(interrupt, irq_id);
} while (mask);
- mask = ioread32(ipa->reg_virt + offset);
+ pending = ioread32(ipa->reg_virt + offset);
}
-}
-
-/* Threaded part of the IPA IRQ handler */
-static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
-{
- struct ipa_interrupt *interrupt = dev_id;
-
- ipa_clock_get(interrupt->ipa);
-
- ipa_interrupt_process_all(interrupt);
- ipa_clock_put(interrupt->ipa);
-
- return IRQ_HANDLED;
-}
+ /* If any disabled interrupts are pending, clear them */
+ if (pending) {
+ struct device *dev = &ipa->pdev->dev;
-/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */
-static irqreturn_t ipa_isr(int irq, void *dev_id)
-{
- struct ipa_interrupt *interrupt = dev_id;
- struct ipa *ipa = interrupt->ipa;
- u32 offset;
- u32 mask;
-
- offset = ipa_reg_irq_stts_offset(ipa->version);
- mask = ioread32(ipa->reg_virt + offset);
- if (mask & interrupt->enabled)
- return IRQ_WAKE_THREAD;
-
- /* Nothing in the mask was supposed to cause an interrupt */
- offset = ipa_reg_irq_clr_offset(ipa->version);
- iowrite32(mask, ipa->reg_virt + offset);
-
- dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n",
- __func__, mask);
+ dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
+ pending);
+ offset = ipa_reg_irq_clr_offset(ipa->version);
+ iowrite32(pending, ipa->reg_virt + offset);
+ }
+out_clock_put:
+ (void)ipa_clock_put(ipa);
return IRQ_HANDLED;
}
@@ -146,7 +130,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 offset;
u32 val;
- /* assert(mask & ipa->available); */
+ WARN_ON(!(mask & ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0)
@@ -206,7 +190,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 offset;
- /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+
interrupt->handler[ipa_irq] = handler;
/* Update the IPA interrupt mask to enable it */
@@ -222,7 +207,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
struct ipa *ipa = interrupt->ipa;
u32 offset;
- /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
offset = ipa_reg_irq_en_offset(ipa->version);
@@ -231,8 +217,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
interrupt->handler[ipa_irq] = NULL;
}
-/* Set up the IPA interrupt framework */
-struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
+/* Configure the IPA interrupt framework */
+struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
@@ -258,7 +244,7 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
offset = ipa_reg_irq_en_offset(ipa->version);
iowrite32(0, ipa->reg_virt + offset);
- ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT,
+ ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
if (ret) {
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
@@ -281,8 +267,8 @@ err_kfree:
return ERR_PTR(ret);
}
-/* Tear down the IPA interrupt framework */
-void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
+/* Inverse of ipa_interrupt_config() */
+void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
{
struct device *dev = &interrupt->ipa->pdev->dev;
int ret;
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index d5c486a6800d..231390cea52a 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -86,17 +86,17 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
/**
- * ipa_interrupt_setup() - Set up the IPA interrupt framework
+ * ipa_interrupt_config() - Configure the IPA interrupt framework
* @ipa: IPA pointer
*
* Return: Pointer to IPA SMP2P info, or a pointer-coded error
*/
-struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
+struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
/**
- * ipa_interrupt_teardown() - Tear down the IPA interrupt framework
+ * ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
* @interrupt: IPA interrupt structure
*/
-void ipa_interrupt_teardown(struct ipa_interrupt *interrupt);
+void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 9810c61a0320..69fa4b3120fd 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -80,29 +80,6 @@
#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
/**
- * ipa_suspend_handler() - Handle the suspend IPA interrupt
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt type (unused)
- *
- * If an RX endpoint is in suspend state, and the IPA has a packet
- * destined for that endpoint, the IPA generates a SUSPEND interrupt
- * to inform the AP that it should resume the endpoint. If we get
- * one of these interrupts we just resume everything.
- */
-static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
-{
- /* Just report the event, and let system resume handle the rest.
- * More than one endpoint could signal this; if so, ignore
- * all but the first.
- */
- if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
- pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
-
- /* Acknowledge/clear the suspend interrupt on all endpoints */
- ipa_interrupt_suspend_clear_all(ipa->interrupt);
-}
-
-/**
* ipa_setup() - Set up IPA hardware
* @ipa: IPA pointer
*
@@ -124,19 +101,9 @@ int ipa_setup(struct ipa *ipa)
if (ret)
return ret;
- ipa->interrupt = ipa_interrupt_setup(ipa);
- if (IS_ERR(ipa->interrupt)) {
- ret = PTR_ERR(ipa->interrupt);
- goto err_gsi_teardown;
- }
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
- ipa_suspend_handler);
-
- ipa_uc_setup(ipa);
-
- ret = device_init_wakeup(dev, true);
+ ret = ipa_power_setup(ipa);
if (ret)
- goto err_uc_teardown;
+ goto err_gsi_teardown;
ipa_endpoint_setup(ipa);
@@ -167,7 +134,7 @@ int ipa_setup(struct ipa *ipa)
ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
/* We're all set. Now prepare for communication with the modem */
- ret = ipa_modem_setup(ipa);
+ ret = ipa_qmi_setup(ipa);
if (ret)
goto err_default_route_clear;
@@ -184,11 +151,7 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
- (void)device_init_wakeup(dev, false);
-err_uc_teardown:
- ipa_uc_teardown(ipa);
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
- ipa_interrupt_teardown(ipa->interrupt);
+ ipa_power_teardown(ipa);
err_gsi_teardown:
gsi_teardown(&ipa->gsi);
@@ -204,17 +167,17 @@ static void ipa_teardown(struct ipa *ipa)
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
- ipa_modem_teardown(ipa);
+ /* We're going to tear everything down, as if setup never completed */
+ ipa->setup_complete = false;
+
+ ipa_qmi_teardown(ipa);
ipa_endpoint_default_route_clear(ipa);
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_endpoint_disable_one(exception_endpoint);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
- (void)device_init_wakeup(&ipa->pdev->dev, false);
- ipa_uc_teardown(ipa);
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
- ipa_interrupt_teardown(ipa->interrupt);
+ ipa_power_teardown(ipa);
gsi_teardown(&ipa->gsi);
}
@@ -253,9 +216,6 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
const struct ipa_qsb_data *data1;
u32 val;
- /* assert(data->qsb_count > 0); */
- /* assert(data->qsb_count < 3); */
-
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
data0 = &data->qsb_data[IPA_QSB_MASTER_DDR];
if (data->qsb_count > 1)
@@ -289,12 +249,11 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
/* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY
* field to represent the given number of microseconds. The value is one
* less than the number of timer ticks in the requested period. 0 is not
- * a valid granularity value.
+ * a valid granularity value (so for example @usec must be at least 16 for
+ * a TIMER_FREQUENCY of 32000).
*/
-static u32 ipa_aggr_granularity_val(u32 usec)
+static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
{
- /* assert(usec != 0); */
-
return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
@@ -463,42 +422,48 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
{
int ret;
- /* Get a clock reference to allow initialization. This reference
- * is held after initialization completes, and won't get dropped
- * unless/until a system suspend request arrives.
- */
- ipa_clock_get(ipa);
-
ipa_hardware_config(ipa, data);
- ret = ipa_endpoint_config(ipa);
+ ret = ipa_mem_config(ipa);
if (ret)
goto err_hardware_deconfig;
- ret = ipa_mem_config(ipa);
+ ipa->interrupt = ipa_interrupt_config(ipa);
+ if (IS_ERR(ipa->interrupt)) {
+ ret = PTR_ERR(ipa->interrupt);
+ ipa->interrupt = NULL;
+ goto err_mem_deconfig;
+ }
+
+ ipa_uc_config(ipa);
+
+ ret = ipa_endpoint_config(ipa);
if (ret)
- goto err_endpoint_deconfig;
+ goto err_uc_deconfig;
ipa_table_config(ipa); /* No deconfig required */
/* Assign resource limitation to each group; no deconfig required */
ret = ipa_resource_config(ipa, data->resource_data);
if (ret)
- goto err_mem_deconfig;
+ goto err_endpoint_deconfig;
ret = ipa_modem_config(ipa);
if (ret)
- goto err_mem_deconfig;
+ goto err_endpoint_deconfig;
return 0;
-err_mem_deconfig:
- ipa_mem_deconfig(ipa);
err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
+err_uc_deconfig:
+ ipa_uc_deconfig(ipa);
+ ipa_interrupt_deconfig(ipa->interrupt);
+ ipa->interrupt = NULL;
+err_mem_deconfig:
+ ipa_mem_deconfig(ipa);
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
- ipa_clock_put(ipa);
return ret;
}
@@ -510,10 +475,12 @@ err_hardware_deconfig:
static void ipa_deconfig(struct ipa *ipa)
{
ipa_modem_deconfig(ipa);
- ipa_mem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
+ ipa_uc_deconfig(ipa);
+ ipa_interrupt_deconfig(ipa->interrupt);
+ ipa->interrupt = NULL;
+ ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
- ipa_clock_put(ipa);
}
static int ipa_firmware_load(struct device *dev)
@@ -612,7 +579,6 @@ MODULE_DEVICE_TABLE(of, ipa_match);
* */
static void ipa_validate_build(void)
{
-#ifdef IPA_VALIDATE
/* At one time we assumed a 64-bit build, allowing some do_div()
* calls to be replaced by simple division or modulo operations.
* We currently only perform divide and modulo operations on u32,
@@ -646,7 +612,6 @@ static void ipa_validate_build(void)
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
field_max(AGGR_GRANULARITY_FMASK));
-#endif /* IPA_VALIDATE */
}
static bool ipa_version_valid(enum ipa_version version)
@@ -771,18 +736,23 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_table_exit;
+ /* The clock needs to be active for config and setup */
+ ret = ipa_clock_get(ipa);
+ if (WARN_ON(ret < 0))
+ goto err_clock_put;
+
ret = ipa_config(ipa, data);
if (ret)
- goto err_modem_exit;
+ goto err_clock_put;
dev_info(dev, "IPA driver initialized");
/* If the modem is doing early initialization, it will trigger a
- * call to ipa_setup() call when it has finished. In that case
- * we're done here.
+ * call to ipa_setup() when it has finished. In that case we're
+ * done here.
*/
if (modem_init)
- return 0;
+ goto done;
/* Otherwise we need to load the firmware and have Trust Zone validate
* and install it. If that succeeds we can proceed with setup.
@@ -794,12 +764,15 @@ static int ipa_probe(struct platform_device *pdev)
ret = ipa_setup(ipa);
if (ret)
goto err_deconfig;
+done:
+ (void)ipa_clock_put(ipa);
return 0;
err_deconfig:
ipa_deconfig(ipa);
-err_modem_exit:
+err_clock_put:
+ (void)ipa_clock_put(ipa);
ipa_modem_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
@@ -825,6 +798,10 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa_clock *clock = ipa->clock;
int ret;
+ ret = ipa_clock_get(ipa);
+ if (WARN_ON(ret < 0))
+ goto out_clock_put;
+
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
/* If starting or stopping is in progress, try once more */
@@ -839,6 +816,9 @@ static int ipa_remove(struct platform_device *pdev)
}
ipa_deconfig(ipa);
+out_clock_put:
+ (void)ipa_clock_put(ipa);
+
ipa_modem_exit(ipa);
ipa_table_exit(ipa);
ipa_endpoint_exit(ipa);
@@ -860,62 +840,6 @@ static void ipa_shutdown(struct platform_device *pdev)
dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
}
-/**
- * ipa_suspend() - Power management system suspend callback
- * @dev: IPA device structure
- *
- * Return: Always returns zero
- *
- * Called by the PM framework when a system suspend operation is invoked.
- * Suspends endpoints and releases the clock reference held to keep
- * the IPA clock running until this point.
- */
-static int ipa_suspend(struct device *dev)
-{
- struct ipa *ipa = dev_get_drvdata(dev);
-
- /* When a suspended RX endpoint has a packet ready to receive, we
- * get an IPA SUSPEND interrupt. We trigger a system resume in
- * that case, but only on the first such interrupt since suspend.
- */
- __clear_bit(IPA_FLAG_RESUMED, ipa->flags);
-
- ipa_endpoint_suspend(ipa);
-
- ipa_clock_put(ipa);
-
- return 0;
-}
-
-/**
- * ipa_resume() - Power management system resume callback
- * @dev: IPA device structure
- *
- * Return: Always returns 0
- *
- * Called by the PM framework when a system resume operation is invoked.
- * Takes an IPA clock reference to keep the clock running until suspend,
- * and resumes endpoints.
- */
-static int ipa_resume(struct device *dev)
-{
- struct ipa *ipa = dev_get_drvdata(dev);
-
- /* This clock reference will keep the IPA out of suspend
- * until we get a power management suspend request.
- */
- ipa_clock_get(ipa);
-
- ipa_endpoint_resume(ipa);
-
- return 0;
-}
-
-static const struct dev_pm_ops ipa_pm_ops = {
- .suspend = ipa_suspend,
- .resume = ipa_resume,
-};
-
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index af9aedbde717..c8724af935b8 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
+#include <linux/pm_runtime.h>
#include <linux/remoteproc/qcom_rproc.h>
#include "ipa.h"
@@ -19,6 +20,8 @@
#include "ipa_modem.h"
#include "ipa_smp2p.h"
#include "ipa_qmi.h"
+#include "ipa_uc.h"
+#include "ipa_clock.h"
#define IPA_NETDEV_NAME "rmnet_ipa%d"
#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
@@ -31,9 +34,14 @@ enum ipa_modem_state {
IPA_MODEM_STATE_STOPPING,
};
-/** struct ipa_priv - IPA network device private data */
+/**
+ * struct ipa_priv - IPA network device private data
+ * @ipa: IPA pointer
+ * @work: Work structure used to wake the modem netdev TX queue
+ */
struct ipa_priv {
struct ipa *ipa;
+ struct work_struct work;
};
/** ipa_open() - Opens the modem network interface */
@@ -43,19 +51,28 @@ static int ipa_open(struct net_device *netdev)
struct ipa *ipa = priv->ipa;
int ret;
+ ret = ipa_clock_get(ipa);
+ if (WARN_ON(ret < 0))
+ goto err_clock_put;
+
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
if (ret)
- return ret;
+ goto err_clock_put;
+
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
if (ret)
goto err_disable_tx;
netif_start_queue(netdev);
+ (void)ipa_clock_put(ipa);
+
return 0;
err_disable_tx:
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+err_clock_put:
+ (void)ipa_clock_put(ipa);
return ret;
}
@@ -65,11 +82,18 @@ static int ipa_stop(struct net_device *netdev)
{
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
+ int ret;
+
+ ret = ipa_clock_get(ipa);
+ if (WARN_ON(ret < 0))
+ goto out_clock_put;
netif_stop_queue(netdev);
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+out_clock_put:
+ (void)ipa_clock_put(ipa);
return 0;
}
@@ -82,13 +106,15 @@ static int ipa_stop(struct net_device *netdev)
* NETDEV_TX_OK: Success
* NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
*/
-static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t
+ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct net_device_stats *stats = &netdev->stats;
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa_endpoint *endpoint;
struct ipa *ipa = priv->ipa;
u32 skb_len = skb->len;
+ struct device *dev;
int ret;
if (!skb_len)
@@ -98,7 +124,31 @@ static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
+ /* The hardware must be powered for us to transmit */
+ dev = &ipa->pdev->dev;
+ ret = pm_runtime_get(dev);
+ if (ret < 1) {
+ /* If a resume won't happen, just drop the packet */
+ if (ret < 0 && ret != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev);
+ goto err_drop_skb;
+ }
+
+ /* No power (yet). Stop the network stack from transmitting
+ * until we're resumed; ipa_modem_resume() arranges for the
+ * TX queue to be started again.
+ */
+ netif_stop_queue(netdev);
+
+ (void)pm_runtime_put(dev);
+
+ return NETDEV_TX_BUSY;
+ }
+
ret = ipa_endpoint_skb_tx(endpoint, skb);
+
+ (void)pm_runtime_put(dev);
+
if (ret) {
if (ret != -E2BIG)
return NETDEV_TX_BUSY;
@@ -169,12 +219,31 @@ void ipa_modem_suspend(struct net_device *netdev)
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
- netif_stop_queue(netdev);
+ if (!(netdev->flags & IFF_UP))
+ return;
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
}
+/**
+ * ipa_modem_wake_queue_work() - enable modem netdev queue
+ * @work: Work structure
+ *
+ * Re-enable transmit on the modem network device. This is called
+ * in (power management) work queue context, scheduled when resuming
+ * the modem. We can't enable the queue directly in ipa_modem_resume()
+ * because transmits restart the instant the queue is awakened; but the
+ * device power state won't be ACTIVE until *after* ipa_modem_resume()
+ * returns.
+ */
+static void ipa_modem_wake_queue_work(struct work_struct *work)
+{
+ struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
+
+ netif_wake_queue(priv->ipa->modem_netdev);
+}
+
/** ipa_modem_resume() - resume callback for runtime_pm
* @dev: pointer to device
*
@@ -185,10 +254,14 @@ void ipa_modem_resume(struct net_device *netdev)
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
+ if (!(netdev->flags & IFF_UP))
+ return;
+
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- netif_wake_queue(netdev);
+ /* Arrange for the TX queue to be restarted */
+ (void)queue_pm_work(&priv->work);
}
int ipa_modem_start(struct ipa *ipa)
@@ -216,13 +289,16 @@ int ipa_modem_start(struct ipa *ipa)
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
+ INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
- if (!ret) {
- ipa->modem_netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
- } else {
+ if (ret) {
+ ipa->modem_netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
free_netdev(netdev);
}
@@ -256,13 +332,18 @@ int ipa_modem_stop(struct ipa *ipa)
/* Prevent the modem from triggering a call to ipa_setup() */
ipa_smp2p_disable(ipa);
- /* Stop the queue and disable the endpoints if it's open */
+ /* Clean up the netdev and endpoints if it was started */
if (netdev) {
- (void)ipa_stop(netdev);
+ struct ipa_priv *priv = netdev_priv(netdev);
+
+ cancel_work_sync(&priv->work);
+ /* If it was opened, stop it first */
+ if (netdev->flags & IFF_UP)
+ (void)ipa_stop(netdev);
+ unregister_netdev(netdev);
+ ipa->modem_netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
- ipa->modem_netdev = NULL;
- unregister_netdev(netdev);
free_netdev(netdev);
}
@@ -278,6 +359,10 @@ static void ipa_modem_crashed(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
+ ret = ipa_clock_get(ipa);
+ if (WARN_ON(ret < 0))
+ goto out_clock_put;
+
ipa_endpoint_modem_pause_all(ipa, true);
ipa_endpoint_modem_hol_block_clear_all(ipa);
@@ -302,6 +387,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
ret = ipa_mem_zero_modem(ipa);
if (ret)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
+
+out_clock_put:
+ (void)ipa_clock_put(ipa);
}
static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
@@ -314,6 +402,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
dev_info(dev, "received modem starting event\n");
+ ipa_uc_clock(ipa);
ipa_smp2p_notify_reset(ipa);
break;
@@ -377,13 +466,3 @@ void ipa_modem_deconfig(struct ipa *ipa)
ipa->notifier = NULL;
memset(&ipa->nb, 0, sizeof(ipa->nb));
}
-
-int ipa_modem_setup(struct ipa *ipa)
-{
- return ipa_qmi_setup(ipa);
-}
-
-void ipa_modem_teardown(struct ipa *ipa)
-{
- ipa_qmi_teardown(ipa);
-}
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index 2de3e216d1d4..5e6e3d234454 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -7,7 +7,6 @@
#define _IPA_MODEM_H_
struct ipa;
-struct ipa_endpoint;
struct net_device;
struct sk_buff;
@@ -25,7 +24,4 @@ void ipa_modem_exit(struct ipa *ipa);
int ipa_modem_config(struct ipa *ipa);
void ipa_modem_deconfig(struct ipa *ipa);
-int ipa_modem_setup(struct ipa *ipa);
-void ipa_modem_teardown(struct ipa *ipa);
-
#endif /* _IPA_MODEM_H_ */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 4661105ce7ab..90f3aec55b36 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -467,10 +467,7 @@ static const struct qmi_ops ipa_client_ops = {
.new_server = ipa_client_new_server,
};
-/* This is called by ipa_setup(). We can be informed via remoteproc that
- * the modem has shut down, in which case this function will be called
- * again to prepare for it coming back up again.
- */
+/* Set up for QMI message exchange */
int ipa_qmi_setup(struct ipa *ipa)
{
struct ipa_qmi *ipa_qmi = &ipa->qmi;
@@ -526,6 +523,7 @@ err_server_handle_release:
return ret;
}
+/* Tear down IPA QMI handles */
void ipa_qmi_teardown(struct ipa *ipa)
{
cancel_work_sync(&ipa->qmi.init_driver_work);
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index b6f2055d35a6..856ef629ccc8 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -39,7 +39,26 @@ struct ipa_qmi {
bool indication_sent;
};
+/**
+ * ipa_qmi_setup() - Set up for QMI message exchange
+ * @ipa: IPA pointer
+ *
+ * This is called at the end of ipa_setup(), to prepare for the exchange
+ * of QMI messages that perform a "handshake" between the AP and modem.
+ * When the modem QMI server announces its presence, an AP request message
+ * supplies operating parameters to be used to the modem, and the modem
+ * acknowledges receipt of those parameters. The modem will not touch the
+ * IPA hardware until this handshake is complete.
+ *
+ * If the modem crashes (or shuts down) a new handshake begins when the
+ * modem's QMI server is started again.
+ */
int ipa_qmi_setup(struct ipa *ipa);
+
+/**
+ * ipa_qmi_teardown() - Tear down IPA QMI handles
+ * @ipa: IPA pointer
+ */
void ipa_qmi_teardown(struct ipa *ipa);
#endif /* !_IPA_QMI_H_ */
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index b89dec5865a5..a5b355384d4a 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -99,7 +99,7 @@ struct ipa;
static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
u32 mask)
{
- /* assert(version >= IPA_VERSION_4_0); */
+ WARN_ON(version < IPA_VERSION_4_0);
if (version < IPA_VERSION_4_9)
return u32_encode_bits(mask, GENMASK(20, 17));
@@ -116,7 +116,7 @@ static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
{
u32 val = enable ? 1 : 0;
- /* assert(version >= IPA_VERSION_4_5); */
+ WARN_ON(version < IPA_VERSION_4_5);
if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
return u32_encode_bits(val, GENMASK(21, 21));
@@ -409,7 +409,7 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
val = u32_encode_bits(size, HDR_LEN_FMASK);
if (version < IPA_VERSION_4_5) {
- /* ipa_assert(header_size == size); */
+ WARN_ON(header_size != size);
return val;
}
@@ -429,7 +429,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
if (version < IPA_VERSION_4_5) {
- /* ipa_assert(offset == off); */
+ WARN_ON(offset != off);
return val;
}
@@ -812,7 +812,7 @@ ipa_reg_irq_suspend_info_offset(enum ipa_version version)
static inline u32
ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
{
- /* assert(version != IPA_VERSION_3_0); */
+ WARN_ON(version == IPA_VERSION_3_0);
if (version < IPA_VERSION_4_9)
return 0x00003034 + 0x1000 * ee;
@@ -830,7 +830,7 @@ ipa_reg_irq_suspend_en_offset(enum ipa_version version)
static inline u32
ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
{
- /* assert(version != IPA_VERSION_3_0); */
+ WARN_ON(version == IPA_VERSION_3_0);
if (version < IPA_VERSION_4_9)
return 0x00003038 + 0x1000 * ee;
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 3b2dc216d3a6..e3da95d69409 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -29,7 +29,6 @@
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
-#ifdef IPA_VALIDATION
u32 group_count;
u32 i;
u32 j;
@@ -65,7 +64,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
-#endif /* !IPA_VALIDATION */
+
return true;
}
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 93270e50b6b3..04b977cf9159 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
+#include <linux/pm_runtime.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
@@ -84,13 +85,15 @@ struct ipa_smp2p {
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
+ struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
- smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa);
+ dev = &smp2p->ipa->pdev->dev;
+ smp2p->clock_on = pm_runtime_get_if_active(dev, true) > 0;
/* Signal whether the clock is enabled */
mask = BIT(smp2p->enabled_bit);
@@ -150,19 +153,26 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
+ int ret;
mutex_lock(&smp2p->mutex);
- if (!smp2p->disabled) {
- int ret;
+ if (smp2p->disabled)
+ goto out_mutex_unlock;
+ smp2p->disabled = true; /* If any others arrive, ignore them */
- ret = ipa_setup(smp2p->ipa);
- if (ret)
- dev_err(&smp2p->ipa->pdev->dev,
- "error %d from ipa_setup()\n", ret);
- smp2p->disabled = true;
- }
+ /* The clock needs to be active for setup */
+ ret = ipa_clock_get(smp2p->ipa);
+ if (WARN_ON(ret < 0))
+ goto out_clock_put;
+
+ /* An error here won't cause driver shutdown, so warn if one occurs */
+ ret = ipa_setup(smp2p->ipa);
+ WARN(ret != 0, "error %d from ipa_setup()\n", ret);
+out_clock_put:
+ (void)ipa_clock_put(smp2p->ipa);
+out_mutex_unlock:
mutex_unlock(&smp2p->mutex);
return IRQ_HANDLED;
@@ -201,7 +211,7 @@ static void ipa_smp2p_clock_release(struct ipa *ipa)
if (!ipa->smp2p->clock_on)
return;
- ipa_clock_put(ipa);
+ (void)ipa_clock_put(ipa);
ipa->smp2p->clock_on = false;
}
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index c617a9156f26..2324e1b93e37 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -120,8 +120,6 @@
*/
#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
-#ifdef IPA_VALIDATE
-
/* Check things that can be validated at build time. */
static void ipa_table_validate_build(void)
{
@@ -161,7 +159,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
- if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
+ if (!ipa_cmd_table_valid(ipa, mem, route))
return false;
/* mem->size >= size is sufficient, but we'll demand more */
@@ -169,7 +167,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
return true;
/* Hashed table regions can be zero size if hashing is not supported */
- if (hashed && !mem->size)
+ if (ipa_table_hash_support(ipa) && !mem->size)
return true;
dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
@@ -183,14 +181,22 @@ bool ipa_table_valid(struct ipa *ipa)
{
bool valid;
- valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
- valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
- valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
- valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
+ valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
+
+ if (!ipa_table_hash_support(ipa))
+ return valid;
+
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
+ false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
+ false);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
+ true);
+ valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
+ true);
return valid;
}
@@ -217,14 +223,6 @@ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
return true;
}
-#else /* !IPA_VALIDATE */
-static void ipa_table_validate_build(void)
-
-{
-}
-
-#endif /* !IPA_VALIDATE */
-
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
@@ -233,7 +231,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
if (!count)
return 0;
-/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
+ WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX));
/* Skip over the zero rule and possibly the filter mask */
skip = filter_mask ? 1 : 2;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 1e2be9fce2f8..b6a9a0d79d68 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -16,8 +16,6 @@ struct ipa;
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
-#ifdef IPA_VALIDATE
-
/**
* ipa_table_valid() - Validate route and filter table memory regions
* @ipa: IPA pointer
@@ -35,20 +33,6 @@ bool ipa_table_valid(struct ipa *ipa);
*/
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
-#else /* !IPA_VALIDATE */
-
-static inline bool ipa_table_valid(struct ipa *ipa)
-{
- return true;
-}
-
-static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
-{
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/**
* ipa_table_hash_support() - Return true if hashed tables are supported
* @ipa: IPA pointer
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index fd9219863234..9c8818c39073 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -131,7 +131,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
else if (shared->event != IPA_UC_EVENT_LOG_INFO)
- dev_err(dev, "unsupported microcontroller event %hhu\n",
+ dev_err(dev, "unsupported microcontroller event %u\n",
shared->event);
/* The LOG_INFO event can be safely ignored */
}
@@ -140,53 +140,67 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
* should only receive responses from the microcontroller when it has
* sent it a request message.
*
- * We can drop the clock reference taken in ipa_uc_setup() once we
+ * We can drop the clock reference taken in ipa_uc_clock() once we
* know the microcontroller has finished its initialization.
*/
switch (shared->response) {
case IPA_UC_RESPONSE_INIT_COMPLETED:
- ipa->uc_loaded = true;
- ipa_clock_put(ipa);
+ if (ipa->uc_clocked) {
+ ipa->uc_loaded = true;
+ (void)ipa_clock_put(ipa);
+ ipa->uc_clocked = false;
+ } else {
+ dev_warn(dev, "unexpected init_completed response\n");
+ }
break;
default:
- dev_warn(&ipa->pdev->dev,
- "unsupported microcontroller response %hhu\n",
+ dev_warn(dev, "unsupported microcontroller response %u\n",
shared->response);
break;
}
}
-/* ipa_uc_setup() - Set up the microcontroller */
-void ipa_uc_setup(struct ipa *ipa)
+/* Configure the IPA microcontroller subsystem */
+void ipa_uc_config(struct ipa *ipa)
{
- /* The microcontroller needs the IPA clock running until it has
- * completed its initialization. It signals this by sending an
- * INIT_COMPLETED response message to the AP. This could occur after
- * we have finished doing the rest of the IPA initialization, so we
- * need to take an extra "proxy" reference, and hold it until we've
- * received that signal. (This reference is dropped in
- * ipa_uc_response_hdlr(), above.)
- */
- ipa_clock_get(ipa);
-
+ ipa->uc_clocked = false;
ipa->uc_loaded = false;
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
}
-/* Inverse of ipa_uc_setup() */
-void ipa_uc_teardown(struct ipa *ipa)
+/* Inverse of ipa_uc_config() */
+void ipa_uc_deconfig(struct ipa *ipa)
{
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
- if (!ipa->uc_loaded)
- ipa_clock_put(ipa);
+ if (ipa->uc_clocked)
+ (void)ipa_clock_put(ipa);
+}
+
+/* Take a proxy clock reference for the microcontroller */
+void ipa_uc_clock(struct ipa *ipa)
+{
+ static bool already;
+ int ret;
+
+ if (already)
+ return;
+ already = true; /* Only do this on first boot */
+
+ /* This clock reference dropped in ipa_uc_response_hdlr() above */
+ ret = ipa_clock_get(ipa);
+ if (WARN(ret < 0, "error %d getting proxy clock\n", ret))
+ (void)ipa_clock_put(ipa);
+
+ ipa->uc_clocked = ret >= 0;
}
/* Send a command to the microcontroller */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index e8510899a3f0..14e4e1115aa7 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -9,16 +9,30 @@
struct ipa;
/**
- * ipa_uc_setup() - set up the IPA microcontroller subsystem
+ * ipa_uc_config() - Configure the IPA microcontroller subsystem
* @ipa: IPA pointer
*/
-void ipa_uc_setup(struct ipa *ipa);
+void ipa_uc_config(struct ipa *ipa);
/**
- * ipa_uc_teardown() - inverse of ipa_uc_setup()
+ * ipa_uc_deconfig() - Inverse of ipa_uc_config()
* @ipa: IPA pointer
*/
-void ipa_uc_teardown(struct ipa *ipa);
+void ipa_uc_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_uc_clock() - Take a proxy clock reference for the microcontroller
+ * @ipa: IPA pointer
+ *
+ * The first time the modem boots, it loads firmware for and starts the
+ * IPA-resident microcontroller. The microcontroller signals that it
+ * has completed its initialization by sending an INIT_COMPLETED response
+ * message to the AP. The AP must ensure the IPA core clock is operating
+ * until it receives this message, and to do so we take a "proxy" clock
+ * reference on its behalf here. Once we receive the INIT_COMPLETED
+ * message (in ipa_uc_response_hdlr()) we drop this clock reference.
+ */
+void ipa_uc_clock(struct ipa *ipa);
/**
* ipa_uc_panic_notifier()
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index a707502a0c0f..c0b21a5580d5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -732,6 +732,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
port = ipvlan_port_get_rtnl(dev);
switch (event) {
+ case NETDEV_UP:
case NETDEV_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
netif_stacked_transfer_operstate(ipvlan->phy_dev,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 80de9768ecd4..35f46ad040b0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -829,7 +829,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int macvlan_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct net_device *real_dev = macvlan_dev_real_dev(dev);
const struct net_device_ops *ops = real_dev->netdev_ops;
@@ -845,8 +845,8 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
fallthrough;
case SIOCGHWTSTAMP:
- if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
- err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ if (netif_device_present(real_dev) && ops->ndo_eth_ioctl)
+ err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
break;
}
@@ -1151,7 +1151,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_stop = macvlan_stop,
.ndo_start_xmit = macvlan_start_xmit,
.ndo_change_mtu = macvlan_change_mtu,
- .ndo_do_ioctl = macvlan_do_ioctl,
+ .ndo_eth_ioctl = macvlan_eth_ioctl,
.ndo_fix_features = macvlan_fix_features,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
new file mode 100644
index 000000000000..d8f966cedc89
--- /dev/null
+++ b/drivers/net/mctp/Kconfig
@@ -0,0 +1,8 @@
+
+if MCTP
+
+menu "MCTP Device Drivers"
+
+endmenu
+
+endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/net/mctp/Makefile
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 99a6c13a11af..6da1fcb25847 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -169,9 +169,10 @@ config MDIO_OCTEON
config MDIO_IPQ4019
tristate "Qualcomm IPQ4019 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
+ depends on COMMON_CLK
help
This driver supports the MDIO interface found in Qualcomm
- IPQ40xx series Soc-s.
+ IPQ40xx, IPQ60xx, IPQ807x and IPQ50xx series Soc-s.
config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 9cd71d896963..14e08b786334 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -11,6 +11,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#define MDIO_MODE_REG 0x40
#define MDIO_ADDR_REG 0x44
@@ -31,8 +32,15 @@
#define IPQ4019_MDIO_TIMEOUT 10000
#define IPQ4019_MDIO_SLEEP 10
+/* MDIO clock source frequency is fixed to 100M */
+#define IPQ_MDIO_CLK_RATE 100000000
+
+#define IPQ_PHY_SET_DELAY_US 100000
+
struct ipq4019_mdio_data {
void __iomem *membase;
+ void __iomem *eth_ldo_rdy;
+ struct clk *mdio_clk;
};
static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
@@ -171,10 +179,35 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int ipq_mdio_reset(struct mii_bus *bus)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ /* To indicate CMN_PLL that ethernet_ldo has been ready if platform resource 1
+ * is specified in the device tree.
+ */
+ if (priv->eth_ldo_rdy) {
+ val = readl(priv->eth_ldo_rdy);
+ val |= BIT(0);
+ writel(val, priv->eth_ldo_rdy);
+ fsleep(IPQ_PHY_SET_DELAY_US);
+ }
+
+ /* Configure MDIO clock source frequency if clock is specified in the device tree */
+ ret = clk_set_rate(priv->mdio_clk, IPQ_MDIO_CLK_RATE);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(priv->mdio_clk);
+}
+
static int ipq4019_mdio_probe(struct platform_device *pdev)
{
struct ipq4019_mdio_data *priv;
struct mii_bus *bus;
+ struct resource *res;
int ret;
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
@@ -187,9 +220,19 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
+ priv->mdio_clk = devm_clk_get_optional(&pdev->dev, "gcc_mdio_ahb_clk");
+ if (IS_ERR(priv->mdio_clk))
+ return PTR_ERR(priv->mdio_clk);
+
+ /* The platform resource is provided on the chipset IPQ5018 */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
+
bus->name = "ipq4019_mdio";
bus->read = ipq4019_mdio_read;
bus->write = ipq4019_mdio_write;
+ bus->reset = ipq_mdio_reset;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
@@ -215,6 +258,7 @@ static int ipq4019_mdio_remove(struct platform_device *pdev)
static const struct of_device_id ipq4019_mdio_dt_ids[] = {
{ .compatible = "qcom,ipq4019-mdio" },
+ { .compatible = "qcom,ipq5018-mdio" },
{ }
};
MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids);
diff --git a/drivers/net/mhi/Makefile b/drivers/net/mhi/Makefile
deleted file mode 100644
index f71b9f8f3c4f..000000000000
--- a/drivers/net/mhi/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_MHI_NET) += mhi_net.o
-
-mhi_net-y := net.o proto_mbim.o
diff --git a/drivers/net/mhi/mhi.h b/drivers/net/mhi/mhi.h
deleted file mode 100644
index 1d0c499d27a3..000000000000
--- a/drivers/net/mhi/mhi.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* MHI Network driver - Network over MHI bus
- *
- * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
- */
-
-struct mhi_net_stats {
- u64_stats_t rx_packets;
- u64_stats_t rx_bytes;
- u64_stats_t rx_errors;
- u64_stats_t rx_dropped;
- u64_stats_t rx_length_errors;
- u64_stats_t tx_packets;
- u64_stats_t tx_bytes;
- u64_stats_t tx_errors;
- u64_stats_t tx_dropped;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync rx_syncp;
-};
-
-struct mhi_net_dev {
- struct mhi_device *mdev;
- struct net_device *ndev;
- struct sk_buff *skbagg_head;
- struct sk_buff *skbagg_tail;
- const struct mhi_net_proto *proto;
- void *proto_data;
- struct delayed_work rx_refill;
- struct mhi_net_stats stats;
- u32 rx_queue_sz;
- int msg_enable;
- unsigned int mru;
-};
-
-struct mhi_net_proto {
- int (*init)(struct mhi_net_dev *mhi_netdev);
- struct sk_buff * (*tx_fixup)(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb);
- void (*rx)(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb);
-};
-
-extern const struct mhi_net_proto proto_mbim;
diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c
deleted file mode 100644
index bf1ad863237d..000000000000
--- a/drivers/net/mhi/proto_mbim.c
+++ /dev/null
@@ -1,304 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* MHI Network driver - Network over MHI bus
- *
- * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
- *
- * This driver copy some code from cdc_ncm, which is:
- * Copyright (C) ST-Ericsson 2010-2012
- * and cdc_mbim, which is:
- * Copyright (c) 2012 Smith Micro Software, Inc.
- * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
- *
- */
-
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/mii.h>
-#include <linux/netdevice.h>
-#include <linux/wwan.h>
-#include <linux/skbuff.h>
-#include <linux/usb.h>
-#include <linux/usb/cdc.h>
-#include <linux/usb/usbnet.h>
-#include <linux/usb/cdc_ncm.h>
-
-#include "mhi.h"
-
-#define MBIM_NDP16_SIGN_MASK 0x00ffffff
-
-/* Usual WWAN MTU */
-#define MHI_MBIM_DEFAULT_MTU 1500
-
-/* 3500 allows to optimize skb allocation, the skbs will basically fit in
- * one 4K page. Large MBIM packets will simply be split over several MHI
- * transfers and chained by the MHI net layer (zerocopy).
- */
-#define MHI_MBIM_DEFAULT_MRU 3500
-
-struct mbim_context {
- u16 rx_seq;
- u16 tx_seq;
-};
-
-static void __mbim_length_errors_inc(struct mhi_net_dev *dev)
-{
- u64_stats_update_begin(&dev->stats.rx_syncp);
- u64_stats_inc(&dev->stats.rx_length_errors);
- u64_stats_update_end(&dev->stats.rx_syncp);
-}
-
-static void __mbim_errors_inc(struct mhi_net_dev *dev)
-{
- u64_stats_update_begin(&dev->stats.rx_syncp);
- u64_stats_inc(&dev->stats.rx_errors);
- u64_stats_update_end(&dev->stats.rx_syncp);
-}
-
-static int mbim_rx_verify_nth16(struct sk_buff *skb)
-{
- struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev);
- struct mbim_context *ctx = dev->proto_data;
- struct usb_cdc_ncm_nth16 *nth16;
- int len;
-
- if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
- sizeof(struct usb_cdc_ncm_ndp16)) {
- netif_dbg(dev, rx_err, dev->ndev, "frame too short\n");
- __mbim_length_errors_inc(dev);
- return -EINVAL;
- }
-
- nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
-
- if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
- netif_dbg(dev, rx_err, dev->ndev,
- "invalid NTH16 signature <%#010x>\n",
- le32_to_cpu(nth16->dwSignature));
- __mbim_errors_inc(dev);
- return -EINVAL;
- }
-
- /* No limit on the block length, except the size of the data pkt */
- len = le16_to_cpu(nth16->wBlockLength);
- if (len > skb->len) {
- netif_dbg(dev, rx_err, dev->ndev,
- "NTB does not fit into the skb %u/%u\n", len,
- skb->len);
- __mbim_length_errors_inc(dev);
- return -EINVAL;
- }
-
- if (ctx->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
- (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
- !(ctx->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
- netif_dbg(dev, rx_err, dev->ndev,
- "sequence number glitch prev=%d curr=%d\n",
- ctx->rx_seq, le16_to_cpu(nth16->wSequence));
- }
- ctx->rx_seq = le16_to_cpu(nth16->wSequence);
-
- return le16_to_cpu(nth16->wNdpIndex);
-}
-
-static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
-{
- struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev);
- int ret;
-
- if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
- netif_dbg(dev, rx_err, dev->ndev, "invalid DPT16 length <%u>\n",
- le16_to_cpu(ndp16->wLength));
- return -EINVAL;
- }
-
- ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
- / sizeof(struct usb_cdc_ncm_dpe16));
- ret--; /* Last entry is always a NULL terminator */
-
- if (sizeof(struct usb_cdc_ncm_ndp16) +
- ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
- netif_dbg(dev, rx_err, dev->ndev,
- "Invalid nframes = %d\n", ret);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb)
-{
- struct net_device *ndev = mhi_netdev->ndev;
- int ndpoffset;
-
- /* Check NTB header and retrieve first NDP offset */
- ndpoffset = mbim_rx_verify_nth16(skb);
- if (ndpoffset < 0) {
- net_err_ratelimited("%s: Incorrect NTB header\n", ndev->name);
- goto error;
- }
-
- /* Process each NDP */
- while (1) {
- struct usb_cdc_ncm_ndp16 ndp16;
- struct usb_cdc_ncm_dpe16 dpe16;
- int nframes, n, dpeoffset;
-
- if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
- net_err_ratelimited("%s: Incorrect NDP offset (%u)\n",
- ndev->name, ndpoffset);
- __mbim_length_errors_inc(mhi_netdev);
- goto error;
- }
-
- /* Check NDP header and retrieve number of datagrams */
- nframes = mbim_rx_verify_ndp16(skb, &ndp16);
- if (nframes < 0) {
- net_err_ratelimited("%s: Incorrect NDP16\n", ndev->name);
- __mbim_length_errors_inc(mhi_netdev);
- goto error;
- }
-
- /* Only IP data type supported, no DSS in MHI context */
- if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
- != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
- net_err_ratelimited("%s: Unsupported NDP type\n", ndev->name);
- __mbim_errors_inc(mhi_netdev);
- goto next_ndp;
- }
-
- /* Only primary IP session 0 (0x00) supported for now */
- if (ndp16.dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) {
- net_err_ratelimited("%s: bad packet session\n", ndev->name);
- __mbim_errors_inc(mhi_netdev);
- goto next_ndp;
- }
-
- /* de-aggregate and deliver IP packets */
- dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
- for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
- u16 dgram_offset, dgram_len;
- struct sk_buff *skbn;
-
- if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
- break;
-
- dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
- dgram_len = le16_to_cpu(dpe16.wDatagramLength);
-
- if (!dgram_offset || !dgram_len)
- break; /* null terminator */
-
- skbn = netdev_alloc_skb(ndev, dgram_len);
- if (!skbn)
- continue;
-
- skb_put(skbn, dgram_len);
- skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
-
- switch (skbn->data[0] & 0xf0) {
- case 0x40:
- skbn->protocol = htons(ETH_P_IP);
- break;
- case 0x60:
- skbn->protocol = htons(ETH_P_IPV6);
- break;
- default:
- net_err_ratelimited("%s: unknown protocol\n",
- ndev->name);
- __mbim_errors_inc(mhi_netdev);
- dev_kfree_skb_any(skbn);
- continue;
- }
-
- netif_rx(skbn);
- }
-next_ndp:
- /* Other NDP to process? */
- ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
- if (!ndpoffset)
- break;
- }
-
- /* free skb */
- dev_consume_skb_any(skb);
- return;
-error:
- dev_kfree_skb_any(skb);
-}
-
-struct mbim_tx_hdr {
- struct usb_cdc_ncm_nth16 nth16;
- struct usb_cdc_ncm_ndp16 ndp16;
- struct usb_cdc_ncm_dpe16 dpe16[2];
-} __packed;
-
-static struct sk_buff *mbim_tx_fixup(struct mhi_net_dev *mhi_netdev,
- struct sk_buff *skb)
-{
- struct mbim_context *ctx = mhi_netdev->proto_data;
- unsigned int dgram_size = skb->len;
- struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct mbim_tx_hdr *mbim_hdr;
-
- /* For now, this is a partial implementation of CDC MBIM, only one NDP
- * is sent, containing the IP packet (no aggregation).
- */
-
- /* Ensure we have enough headroom for crafting MBIM header */
- if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
-
- /* Fill NTB header */
- nth16 = &mbim_hdr->nth16;
- nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
- nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
- nth16->wBlockLength = cpu_to_le16(skb->len);
- nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
-
- /* Fill the unique NDP */
- ndp16 = &mbim_hdr->ndp16;
- ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
- ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
- + sizeof(struct usb_cdc_ncm_dpe16) * 2);
- ndp16->wNextNdpIndex = 0;
-
- /* Datagram follows the mbim header */
- ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
- ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
-
- /* null termination */
- ndp16->dpe16[1].wDatagramIndex = 0;
- ndp16->dpe16[1].wDatagramLength = 0;
-
- return skb;
-}
-
-static int mbim_init(struct mhi_net_dev *mhi_netdev)
-{
- struct net_device *ndev = mhi_netdev->ndev;
-
- mhi_netdev->proto_data = devm_kzalloc(&ndev->dev,
- sizeof(struct mbim_context),
- GFP_KERNEL);
- if (!mhi_netdev->proto_data)
- return -ENOMEM;
-
- ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
- ndev->mtu = MHI_MBIM_DEFAULT_MTU;
- mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU;
-
- return 0;
-}
-
-const struct mhi_net_proto proto_mbim = {
- .init = mbim_init,
- .rx = mbim_rx,
- .tx_fixup = mbim_tx_fixup,
-};
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi_net.c
index 11be6bcdd551..975f7f9bdf4c 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi_net.c
@@ -11,28 +11,42 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
-#include <linux/wwan.h>
-
-#include "mhi.h"
#define MHI_NET_MIN_MTU ETH_MIN_MTU
#define MHI_NET_MAX_MTU 0xffff
#define MHI_NET_DEFAULT_MTU 0x4000
-/* When set to false, the default netdev (link 0) is not created, and it's up
- * to user to create the link (via wwan rtnetlink).
- */
-static bool create_default_iface = true;
-module_param(create_default_iface, bool, 0);
+struct mhi_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_net_dev {
+ struct mhi_device *mdev;
+ struct net_device *ndev;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ struct delayed_work rx_refill;
+ struct mhi_net_stats stats;
+ u32 rx_queue_sz;
+ int msg_enable;
+ unsigned int mru;
+};
struct mhi_device_info {
const char *netname;
- const struct mhi_net_proto *proto;
};
static int mhi_ndo_open(struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
/* Feed the rx buffer pool */
schedule_delayed_work(&mhi_netdev->rx_refill, 0);
@@ -47,7 +61,7 @@ static int mhi_ndo_open(struct net_device *ndev)
static int mhi_ndo_stop(struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
netif_stop_queue(ndev);
netif_carrier_off(ndev);
@@ -58,17 +72,10 @@ static int mhi_ndo_stop(struct net_device *ndev)
static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
- const struct mhi_net_proto *proto = mhi_netdev->proto;
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
struct mhi_device *mdev = mhi_netdev->mdev;
int err;
- if (proto && proto->tx_fixup) {
- skb = proto->tx_fixup(mhi_netdev, skb);
- if (unlikely(!skb))
- goto exit_drop;
- }
-
err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
if (unlikely(err)) {
net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
@@ -93,7 +100,7 @@ exit_drop:
static void mhi_ndo_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
- struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
unsigned int start;
do {
@@ -101,8 +108,6 @@ static void mhi_ndo_get_stats64(struct net_device *ndev,
stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
- stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
- stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors);
} while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
do {
@@ -165,7 +170,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
- const struct mhi_net_proto *proto = mhi_netdev->proto;
struct sk_buff *skb = mhi_res->buf_addr;
int free_desc_count;
@@ -205,11 +209,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
mhi_netdev->skbagg_head = NULL;
}
- u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
- u64_stats_inc(&mhi_netdev->stats.rx_packets);
- u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
- u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
-
switch (skb->data[0] & 0xf0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
@@ -222,10 +221,11 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
break;
}
- if (proto && proto->rx)
- proto->rx(mhi_netdev, skb);
- else
- netif_rx(skb);
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+ netif_rx(skb);
}
/* Refill if RX buffers queue becomes low */
@@ -248,7 +248,6 @@ static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
if (unlikely(mhi_res->transaction_status)) {
-
/* MHI layer stopping/resetting the UL channel */
if (mhi_res->transaction_status == -ENOTCONN) {
u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
@@ -302,33 +301,18 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
}
-static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
- struct netlink_ext_ack *extack)
+static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
{
- const struct mhi_device_info *info;
- struct mhi_device *mhi_dev = ctxt;
struct mhi_net_dev *mhi_netdev;
int err;
- info = (struct mhi_device_info *)mhi_dev->id->driver_data;
-
- /* For now we only support one link (link context 0), driver must be
- * reworked to break 1:1 relationship for net MBIM and to forward setup
- * call to rmnet(QMAP) otherwise.
- */
- if (if_id != 0)
- return -EINVAL;
-
- if (dev_get_drvdata(&mhi_dev->dev))
- return -EBUSY;
-
- mhi_netdev = wwan_netdev_drvpriv(ndev);
+ mhi_netdev = netdev_priv(ndev);
dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
mhi_netdev->ndev = ndev;
mhi_netdev->mdev = mhi_dev;
mhi_netdev->skbagg_head = NULL;
- mhi_netdev->proto = info->proto;
+ mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
@@ -342,38 +326,22 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
/* Number of transfer descriptors determines size of the queue */
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- if (extack)
- err = register_netdevice(ndev);
- else
- err = register_netdev(ndev);
+ err = register_netdev(ndev);
if (err)
- goto out_err;
-
- if (mhi_netdev->proto) {
- err = mhi_netdev->proto->init(mhi_netdev);
- if (err)
- goto out_err_proto;
- }
+ return err;
return 0;
-out_err_proto:
- unregister_netdevice(ndev);
out_err:
free_netdev(ndev);
return err;
}
-static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
- struct list_head *head)
+static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
{
- struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
- struct mhi_device *mhi_dev = ctxt;
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
- if (head)
- unregister_netdevice_queue(ndev, head);
- else
- unregister_netdev(ndev);
+ unregister_netdev(ndev);
mhi_unprepare_from_transfer(mhi_dev);
@@ -382,65 +350,34 @@ static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
dev_set_drvdata(&mhi_dev->dev, NULL);
}
-static const struct wwan_ops mhi_wwan_ops = {
- .priv_size = sizeof(struct mhi_net_dev),
- .setup = mhi_net_setup,
- .newlink = mhi_net_newlink,
- .dellink = mhi_net_dellink,
-};
-
static int mhi_net_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
- struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct net_device *ndev;
int err;
- err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev,
- WWAN_NO_DEFAULT_LINK);
- if (err)
- return err;
-
- if (!create_default_iface)
- return 0;
-
- /* Create a default interface which is used as either RMNET real-dev,
- * MBIM link 0 or ip link 0)
- */
ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
NET_NAME_PREDICTABLE, mhi_net_setup);
- if (!ndev) {
- err = -ENOMEM;
- goto err_unregister;
- }
+ if (!ndev)
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
- err = mhi_net_newlink(mhi_dev, ndev, 0, NULL);
- if (err)
- goto err_release;
+ err = mhi_net_newlink(mhi_dev, ndev);
+ if (err) {
+ free_netdev(ndev);
+ return err;
+ }
return 0;
-
-err_release:
- free_netdev(ndev);
-err_unregister:
- wwan_unregister_ops(&cntrl->mhi_dev->dev);
-
- return err;
}
static void mhi_net_remove(struct mhi_device *mhi_dev)
{
struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
- struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
-
- /* WWAN core takes care of removing remaining links */
- wwan_unregister_ops(&cntrl->mhi_dev->dev);
- if (create_default_iface)
- mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL);
+ mhi_net_dellink(mhi_dev, mhi_netdev->ndev);
}
static const struct mhi_device_info mhi_hwip0 = {
@@ -451,18 +388,11 @@ static const struct mhi_device_info mhi_swip0 = {
.netname = "mhi_swip%d",
};
-static const struct mhi_device_info mhi_hwip0_mbim = {
- .netname = "mhi_mbim%d",
- .proto = &proto_mbim,
-};
-
static const struct mhi_device_id mhi_net_id_table[] = {
/* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
{ .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
/* Software data PATH (to modem CPU) */
{ .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
- /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
- { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim },
{}
};
MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 779c3a96dba7..22680f47385d 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -49,10 +49,8 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
*
* The @ecmd parameter is expected to have been cleared before calling
* mii_ethtool_gset().
- *
- * Returns 0 for success, negative on error.
*/
-int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
{
struct net_device *dev = mii->dev;
u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0;
@@ -131,8 +129,6 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
mii->full_duplex = ecmd->duplex;
/* ignore maxtxpkt, maxrxpkt for now */
-
- return 0;
}
/**
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index ccec29970d5b..62d033a1a557 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -183,8 +183,6 @@ new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
- struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
- struct devlink *devlink;
unsigned int port_index;
int ret;
@@ -195,12 +193,15 @@ new_port_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- devlink = priv_to_devlink(nsim_dev);
+ if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
+ return -EBUSY;
+
+ if (nsim_bus_dev->in_reload) {
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return -EBUSY;
+ }
- mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
- devlink_reload_disable(devlink);
ret = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
- devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -212,8 +213,6 @@ del_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
- struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
- struct devlink *devlink;
unsigned int port_index;
int ret;
@@ -224,12 +223,15 @@ del_port_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- devlink = priv_to_devlink(nsim_dev);
+ if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
+ return -EBUSY;
+
+ if (nsim_bus_dev->in_reload) {
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return -EBUSY;
+ }
- mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
- devlink_reload_disable(devlink);
ret = nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
- devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -262,29 +264,31 @@ static struct device_type nsim_bus_dev_type = {
};
static struct nsim_bus_dev *
-nsim_bus_dev_new(unsigned int id, unsigned int port_count);
+nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues);
static ssize_t
new_device_store(struct bus_type *bus, const char *buf, size_t count)
{
+ unsigned int id, port_count, num_queues;
struct nsim_bus_dev *nsim_bus_dev;
- unsigned int port_count;
- unsigned int id;
int err;
- err = sscanf(buf, "%u %u", &id, &port_count);
+ err = sscanf(buf, "%u %u %u", &id, &port_count, &num_queues);
switch (err) {
case 1:
port_count = 1;
fallthrough;
case 2:
+ num_queues = 1;
+ fallthrough;
+ case 3:
if (id > INT_MAX) {
pr_err("Value of \"id\" is too big.\n");
return -EINVAL;
}
break;
default:
- pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
+ pr_err("Format for adding new device is \"id port_count num_queues\" (uint uint unit).\n");
return -EINVAL;
}
@@ -295,7 +299,7 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
goto err;
}
- nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+ nsim_bus_dev = nsim_bus_dev_new(id, port_count, num_queues);
if (IS_ERR(nsim_bus_dev)) {
err = PTR_ERR(nsim_bus_dev);
goto err;
@@ -397,7 +401,7 @@ static struct bus_type nsim_bus = {
#define NSIM_BUS_DEV_MAX_VFS 4
static struct nsim_bus_dev *
-nsim_bus_dev_new(unsigned int id, unsigned int port_count)
+nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues)
{
struct nsim_bus_dev *nsim_bus_dev;
int err;
@@ -413,6 +417,7 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.bus = &nsim_bus;
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->num_queues = num_queues;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 6348307bfa84..54313bd57797 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -864,16 +864,24 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_bus_dev *nsim_bus_dev;
+
+ nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock))
+ return -EOPNOTSUPP;
if (nsim_dev->dont_allow_reload) {
/* For testing purposes, user set debugfs dont_allow_reload
* value to true. So forbid it.
*/
NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return -EOPNOTSUPP;
}
+ nsim_bus_dev->in_reload = true;
nsim_dev_reload_destroy(nsim_dev);
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return 0;
}
@@ -882,17 +890,26 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_bus_dev *nsim_bus_dev;
+ int ret;
+
+ nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+ nsim_bus_dev->in_reload = false;
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
* value to true. Fail right away.
*/
NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return -EINVAL;
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return nsim_dev_reload_create(nsim_dev, extack);
+ ret = nsim_dev_reload_create(nsim_dev, extack);
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return ret;
}
static int nsim_dev_info_get(struct devlink *devlink,
@@ -1431,10 +1448,10 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
struct devlink *devlink;
int err;
- devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ devlink = devlink_alloc_ns(&nsim_dev_devlink_ops, sizeof(*nsim_dev),
+ nsim_bus_dev->initial_net, &nsim_bus_dev->dev);
if (!devlink)
return -ENOMEM;
- devlink_net_set(devlink, nsim_bus_dev->initial_net);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
@@ -1453,7 +1470,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_devlink_free;
- err = devlink_register(devlink, &nsim_bus_dev->dev);
+ err = devlink_register(devlink);
if (err)
goto err_resources_unregister;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 213d3e5056c8..4300261e2f9e 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -1441,7 +1441,7 @@ static u64 nsim_fib_nexthops_res_occ_get(void *priv)
static void nsim_fib_set_max_all(struct nsim_fib_data *data,
struct devlink *devlink)
{
- enum nsim_resource_id res_ids[] = {
+ static const enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_NEXTHOPS,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index c3aeb15843e2..50572e0f1f52 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -347,7 +347,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
struct netdevsim *ns;
int err;
- dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup);
+ dev = alloc_netdev_mq(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup,
+ nsim_dev->nsim_bus_dev->num_queues);
if (!dev)
return ERR_PTR(-ENOMEM);
@@ -392,7 +393,8 @@ void nsim_destroy(struct netdevsim *ns)
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Please use: echo \"[ID] [PORT_COUNT] [NUM_QUEUES]\" > /sys/bus/netdevsim/new_device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index ae462957dcee..793c86dc5a9c 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -352,6 +352,7 @@ struct nsim_bus_dev {
struct device dev;
struct list_head list;
unsigned int port_count;
+ unsigned int num_queues; /* Number of queues for each port on this bus */
struct net *initial_net; /* Purpose of this is to carry net pointer
* during the probe time only.
*/
@@ -361,6 +362,7 @@ struct nsim_bus_dev {
struct nsim_vf_config *vfconfigs;
/* Lock for devlink->reload_enabled in netdevsim module */
struct mutex nsim_bus_reload_lock;
+ bool in_reload;
bool init;
};
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 4bd61339823c..fb0a83dc09ac 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -65,6 +65,9 @@ static const int xpcs_xlgmii_features[] = {
};
static const int xpcs_sgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
@@ -75,6 +78,7 @@ static const int xpcs_sgmii_features[] = {
};
static const int xpcs_2500basex_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c56f703ae998..902495afcb38 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -207,6 +207,12 @@ config MARVELL_88X2222_PHY
Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet
Transceiver.
+config MAXLINEAR_GPHY
+ tristate "Maxlinear Ethernet PHYs"
+ help
+ Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
+ GPY241, GPY245 PHYs.
+
config MEDIATEK_GE_PHY
tristate "MediaTek Gigabit Ethernet PHYs"
help
@@ -230,6 +236,7 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
depends on MACSEC || MACSEC=n
+ depends on PTP_1588_CLOCK_OPTIONAL || !NETWORK_PHY_TIMESTAMPING
select CRYPTO_LIB_AES if MACSEC
help
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
@@ -247,6 +254,7 @@ config NATIONAL_PHY
config NXP_C45_TJA11XX_PHY
tristate "NXP C45 TJA11XX PHYs"
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Enable support for NXP C45 TJA11XX PHYs.
Currently supports only the TJA1103 PHY.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 172bb193ae6a..b2728d00fc9a 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
+obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5d62b85a4024..bdac087058b2 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -532,12 +532,6 @@ static int at8031_register_regulators(struct phy_device *phydev)
return 0;
}
-static bool at803x_match_phy_id(struct phy_device *phydev, u32 phy_id)
-{
- return (phydev->phy_id & phydev->drv->phy_id_mask)
- == (phy_id & phydev->drv->phy_id_mask);
-}
-
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -602,8 +596,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
* to the AR8030 so there might be a good chance it works on
* the AR8030 too.
*/
- if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
- at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8030_PHY_ID ||
+ phydev->drv->phy_id == ATH8035_PHY_ID) {
priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
}
@@ -631,7 +625,7 @@ static int at803x_parse_dt(struct phy_device *phydev)
/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
* options.
*/
- if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
if (of_property_read_bool(node, "qca,keep-pll-enabled"))
priv->flags |= AT803X_KEEP_PLL_ENABLED;
@@ -676,7 +670,7 @@ static int at803x_probe(struct phy_device *phydev)
* Switch to the copper page, as otherwise we read
* the PHY capabilities from the fiber side.
*/
- if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
phy_lock_mdio_bus(phydev);
ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
phy_unlock_mdio_bus(phydev);
@@ -709,7 +703,7 @@ static int at803x_get_features(struct phy_device *phydev)
if (err)
return err;
- if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID))
+ if (phydev->drv->phy_id != ATH8031_PHY_ID)
return 0;
/* AR8031/AR8033 have different status registers
@@ -820,7 +814,7 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) {
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
ret = at8031_pll_config(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index f7a2ec150e54..211b5476a6f5 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -326,11 +326,9 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
static int dp8382x_disable_wol(struct phy_device *phydev)
{
- int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
- DP83822_WOL_SECURE_ON;
-
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_WOL_CFG, value);
+ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
+ DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
+ DP83822_WOL_SECURE_ON);
}
static int dp83822_read_status(struct phy_device *phydev)
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index d453ec016168..3c032868ef04 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -8,11 +8,16 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
+#define XWAY_MDIO_MIICTRL 0x17 /* mii control */
#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
#define XWAY_MDIO_LED 0x1B /* led control */
+#define XWAY_MDIO_MIICTRL_RXSKEW_MASK GENMASK(14, 12)
+#define XWAY_MDIO_MIICTRL_TXSKEW_MASK GENMASK(10, 8)
+
/* bit 15:12 are reserved */
#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */
#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */
@@ -157,6 +162,73 @@
#define PHY_ID_PHY11G_VR9_1_2 0xD565A409
#define PHY_ID_PHY22F_VR9_1_2 0xD565A419
+static const int xway_internal_delay[] = {0, 500, 1000, 1500, 2000, 2500,
+ 3000, 3500};
+
+static int xway_gphy_rgmii_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ unsigned int delay_size = ARRAY_SIZE(xway_internal_delay);
+ s32 int_delay;
+ int val = 0;
+
+ if (!phy_interface_is_rgmii(phydev))
+ return 0;
+
+ /* Existing behavior was to use default pin strapping delay in rgmii
+ * mode, but rgmii should have meant no delay. Warn existing users,
+ * but do not change anything at the moment.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ u16 txskew, rxskew;
+
+ val = phy_read(phydev, XWAY_MDIO_MIICTRL);
+ if (val < 0)
+ return val;
+
+ txskew = FIELD_GET(XWAY_MDIO_MIICTRL_TXSKEW_MASK, val);
+ rxskew = FIELD_GET(XWAY_MDIO_MIICTRL_RXSKEW_MASK, val);
+
+ if (txskew > 0 || rxskew > 0)
+ phydev_warn(phydev,
+ "PHY has delays (e.g. via pin strapping), but phy-mode = 'rgmii'\n"
+ "Should be 'rgmii-id' to use internal delays txskew:%d ps rxskew:%d ps\n",
+ xway_internal_delay[txskew],
+ xway_internal_delay[rxskew]);
+ return 0;
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ int_delay = phy_get_internal_delay(phydev, dev,
+ xway_internal_delay,
+ delay_size, true);
+
+ /* if rx-internal-delay-ps is missing, use default of 2.0 ns */
+ if (int_delay < 0)
+ int_delay = 4; /* 2000 ps */
+
+ val |= FIELD_PREP(XWAY_MDIO_MIICTRL_RXSKEW_MASK, int_delay);
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ int_delay = phy_get_internal_delay(phydev, dev,
+ xway_internal_delay,
+ delay_size, false);
+
+ /* if tx-internal-delay-ps is missing, use default of 2.0 ns */
+ if (int_delay < 0)
+ int_delay = 4; /* 2000 ps */
+
+ val |= FIELD_PREP(XWAY_MDIO_MIICTRL_TXSKEW_MASK, int_delay);
+ }
+
+ return phy_modify(phydev, XWAY_MDIO_MIICTRL,
+ XWAY_MDIO_MIICTRL_RXSKEW_MASK |
+ XWAY_MDIO_MIICTRL_TXSKEW_MASK, val);
+}
+
static int xway_gphy_config_init(struct phy_device *phydev)
{
int err;
@@ -204,6 +276,10 @@ static int xway_gphy_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh);
phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl);
+ err = xway_gphy_rgmii_init(phydev);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 3de93c9f2744..4fcfca4e1702 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -32,6 +32,7 @@
#include <linux/marvell_phy.h>
#include <linux/bitfield.h>
#include <linux/of.h>
+#include <linux/sfp.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -46,6 +47,7 @@
#define MII_MARVELL_MISC_TEST_PAGE 0x06
#define MII_MARVELL_VCT7_PAGE 0x07
#define MII_MARVELL_WOL_PAGE 0x11
+#define MII_MARVELL_MODE_PAGE 0x12
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -155,6 +157,7 @@
#define MII_88E1318S_PHY_WOL_CTRL 0x10
#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
+#define MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE BIT(13)
#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
#define MII_PHY_LED_CTRL 16
@@ -176,7 +179,14 @@
#define MII_88E1510_GEN_CTRL_REG_1 0x14
#define MII_88E1510_GEN_CTRL_REG_1_MODE_MASK 0x7
+#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII 0x0 /* RGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
+/* RGMII to 1000BASE-X */
+#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X 0x2
+/* RGMII to 100BASE-FX */
+#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX 0x3
+/* RGMII to SGMII */
+#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
#define MII_VCT5_TX_RX_MDI0_COUPLING 0x10
@@ -1746,13 +1756,19 @@ static void m88e1318_get_wol(struct phy_device *phydev,
{
int ret;
- wol->supported = WAKE_MAGIC;
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
wol->wolopts = 0;
ret = phy_read_paged(phydev, MII_MARVELL_WOL_PAGE,
MII_88E1318S_PHY_WOL_CTRL);
- if (ret >= 0 && ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ if (ret < 0)
+ return;
+
+ if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
+
+ if (ret & MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE)
+ wol->wolopts |= WAKE_PHY;
}
static int m88e1318_set_wol(struct phy_device *phydev,
@@ -1764,7 +1780,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
if (oldpage < 0)
goto error;
- if (wol->wolopts & WAKE_MAGIC) {
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_PHY)) {
/* Explicitly switch to page 0x00, just to be sure */
err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
@@ -1796,7 +1812,9 @@ static int m88e1318_set_wol(struct phy_device *phydev,
MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
if (err < 0)
goto error;
+ }
+ if (wol->wolopts & WAKE_MAGIC) {
err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
goto error;
@@ -1837,6 +1855,30 @@ static int m88e1318_set_wol(struct phy_device *phydev,
goto error;
}
+ if (wol->wolopts & WAKE_PHY) {
+ err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
+ if (err < 0)
+ goto error;
+
+ /* Clear WOL status and enable link up event */
+ err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+ MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE);
+ if (err < 0)
+ goto error;
+ } else {
+ err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
+ if (err < 0)
+ goto error;
+
+ /* Clear WOL status and disable link up event */
+ err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
+ MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
+ if (err < 0)
+ goto error;
+ }
+
error:
return phy_restore_page(phydev, oldpage, err);
}
@@ -2701,6 +2743,100 @@ static int marvell_probe(struct phy_device *phydev)
return marvell_hwmon_probe(phydev);
}
+static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ phy_interface_t interface;
+ struct device *dev;
+ int oldpage;
+ int ret = 0;
+ u16 mode;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+
+ dev = &phydev->mdio.dev;
+
+ sfp_parse_support(phydev->sfp_bus, id, supported);
+ interface = sfp_select_interface(phydev->sfp_bus, supported);
+
+ dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X;
+
+ break;
+ case PHY_INTERFACE_MODE_100BASEX:
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX;
+
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII;
+
+ break;
+ default:
+ dev_err(dev, "Incompatible SFP module inserted\n");
+
+ return -EINVAL;
+ }
+
+ oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE);
+ if (oldpage < 0)
+ goto error;
+
+ ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, mode);
+ if (ret < 0)
+ goto error;
+
+ ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_RESET);
+
+error:
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static void m88e1510_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+ int oldpage;
+ int ret = 0;
+
+ oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE);
+ if (oldpage < 0)
+ goto error;
+
+ ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII);
+ if (ret < 0)
+ goto error;
+
+ ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_RESET);
+
+error:
+ phy_restore_page(phydev, oldpage, ret);
+}
+
+static const struct sfp_upstream_ops m88e1510_sfp_ops = {
+ .module_insert = m88e1510_sfp_insert,
+ .module_remove = m88e1510_sfp_remove,
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+};
+
+static int m88e1510_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return phy_sfp_probe(phydev, &m88e1510_sfp_ops);
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -2927,7 +3063,7 @@ static struct phy_driver marvell_drivers[] = {
.driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops),
.features = PHY_GBIT_FIBRE_FEATURES,
.flags = PHY_POLL_CABLE_TEST,
- .probe = marvell_probe,
+ .probe = m88e1510_probe,
.config_init = m88e1510_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 53a433442803..0b7cae118ad7 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -28,6 +28,7 @@
#include <linux/marvell_phy.h>
#include <linux/phy.h>
#include <linux/sfp.h>
+#include <linux/netdevice.h>
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
@@ -104,6 +105,16 @@ enum {
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5,
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7,
+ MV_V2_PORT_INTR_STS = 0xf040,
+ MV_V2_PORT_INTR_MASK = 0xf043,
+ MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
+ MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
+ MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
+ MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
+ /* Wake on LAN registers */
+ MV_V2_WOL_CTRL = 0xf06e,
+ MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
/* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
@@ -1020,6 +1031,80 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
return mv211x_match_phy_device(phydev, false);
}
+static void mv3110_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_WOL_CTRL);
+ if (ret < 0)
+ return;
+
+ if (ret & MV_V2_WOL_CTRL_MAGIC_PKT_EN)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int mv3110_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Enable the WOL interrupt */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_PORT_INTR_MASK,
+ MV_V2_PORT_INTR_STS_WOL_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Store the device address for the magic packet */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_MAGIC_PKT_WORD2,
+ ((phydev->attached_dev->dev_addr[5] << 8) |
+ phydev->attached_dev->dev_addr[4]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_MAGIC_PKT_WORD1,
+ ((phydev->attached_dev->dev_addr[3] << 8) |
+ phydev->attached_dev->dev_addr[2]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_MAGIC_PKT_WORD0,
+ ((phydev->attached_dev->dev_addr[1] << 8) |
+ phydev->attached_dev->dev_addr[0]));
+ if (ret < 0)
+ return ret;
+
+ /* Clear WOL status and enable magic packet matching */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_WOL_CTRL,
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN |
+ MV_V2_WOL_CTRL_CLEAR_STS);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable magic packet matching & reset WOL status bit */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_WOL_CTRL,
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN,
+ MV_V2_WOL_CTRL_CLEAR_STS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Reset the clear WOL status bit as it does not self-clear */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MV_V2_WOL_CTRL,
+ MV_V2_WOL_CTRL_CLEAR_STS);
+}
+
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -1039,6 +1124,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
.set_loopback = genphy_c45_loopback,
+ .get_wol = mv3110_get_wol,
+ .set_wol = mv3110_set_wol,
},
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -1076,6 +1163,8 @@ static struct phy_driver mv3310_drivers[] = {
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
.set_loopback = genphy_c45_loopback,
+ .get_wol = mv3110_get_wol,
+ .set_wol = mv3110_set_wol,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 924ed5b034a4..edb951695b13 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -506,7 +506,7 @@ static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
{
struct vsc8531_private *vsc8531 = phydev->priv;
bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
- u8 msgs[] = {
+ static const u8 msgs[] = {
PTP_MSGTYPE_SYNC,
PTP_MSGTYPE_DELAY_REQ
};
@@ -847,7 +847,7 @@ static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk
static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
bool one_step, bool enable)
{
- u8 msgs[] = {
+ static const u8 msgs[] = {
PTP_MSGTYPE_SYNC,
PTP_MSGTYPE_DELAY_REQ
};
@@ -1268,8 +1268,8 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
static int __vsc8584_init_ptp(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531 = phydev->priv;
- u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
- u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
+ static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
+ static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
u32 val;
if (!vsc8584_is_1588_input_clk_configured(phydev)) {
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
new file mode 100644
index 000000000000..2d5d5081c3b6
--- /dev/null
+++ b/drivers/net/phy/mxl-gpy.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2021 Maxlinear Corporation
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * Drivers for Maxlinear Ethernet GPY
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+/* PHY ID */
+#define PHY_ID_GPYx15B_MASK 0xFFFFFFFC
+#define PHY_ID_GPY21xB_MASK 0xFFFFFFF9
+#define PHY_ID_GPY2xx 0x67C9DC00
+#define PHY_ID_GPY115B 0x67C9DF00
+#define PHY_ID_GPY115C 0x67C9DF10
+#define PHY_ID_GPY211B 0x67C9DE08
+#define PHY_ID_GPY211C 0x67C9DE10
+#define PHY_ID_GPY212B 0x67C9DE09
+#define PHY_ID_GPY212C 0x67C9DE20
+#define PHY_ID_GPY215B 0x67C9DF04
+#define PHY_ID_GPY215C 0x67C9DF20
+#define PHY_ID_GPY241B 0x67C9DE40
+#define PHY_ID_GPY241BM 0x67C9DE80
+#define PHY_ID_GPY245B 0x67C9DEC0
+
+#define PHY_MIISTAT 0x18 /* MII state */
+#define PHY_IMASK 0x19 /* interrupt mask */
+#define PHY_ISTAT 0x1A /* interrupt status */
+#define PHY_FWV 0x1E /* firmware version */
+
+#define PHY_MIISTAT_SPD_MASK GENMASK(2, 0)
+#define PHY_MIISTAT_DPX BIT(3)
+#define PHY_MIISTAT_LS BIT(10)
+
+#define PHY_MIISTAT_SPD_10 0
+#define PHY_MIISTAT_SPD_100 1
+#define PHY_MIISTAT_SPD_1000 2
+#define PHY_MIISTAT_SPD_2500 4
+
+#define PHY_IMASK_WOL BIT(15) /* Wake-on-LAN */
+#define PHY_IMASK_ANC BIT(10) /* Auto-Neg complete */
+#define PHY_IMASK_ADSC BIT(5) /* Link auto-downspeed detect */
+#define PHY_IMASK_DXMC BIT(2) /* Duplex mode change */
+#define PHY_IMASK_LSPC BIT(1) /* Link speed change */
+#define PHY_IMASK_LSTC BIT(0) /* Link state change */
+#define PHY_IMASK_MASK (PHY_IMASK_LSTC | \
+ PHY_IMASK_LSPC | \
+ PHY_IMASK_DXMC | \
+ PHY_IMASK_ADSC | \
+ PHY_IMASK_ANC)
+
+#define PHY_FWV_REL_MASK BIT(15)
+#define PHY_FWV_TYPE_MASK GENMASK(11, 8)
+#define PHY_FWV_MINOR_MASK GENMASK(7, 0)
+
+/* SGMII */
+#define VSPEC1_SGMII_CTRL 0x08
+#define VSPEC1_SGMII_CTRL_ANEN BIT(12) /* Aneg enable */
+#define VSPEC1_SGMII_CTRL_ANRS BIT(9) /* Restart Aneg */
+#define VSPEC1_SGMII_ANEN_ANRS (VSPEC1_SGMII_CTRL_ANEN | \
+ VSPEC1_SGMII_CTRL_ANRS)
+
+/* WoL */
+#define VPSPEC2_WOL_CTL 0x0E06
+#define VPSPEC2_WOL_AD01 0x0E08
+#define VPSPEC2_WOL_AD23 0x0E09
+#define VPSPEC2_WOL_AD45 0x0E0A
+#define WOL_EN BIT(0)
+
+static const struct {
+ int type;
+ int minor;
+} ver_need_sgmii_reaneg[] = {
+ {7, 0x6D},
+ {8, 0x6D},
+ {9, 0x73},
+};
+
+static int gpy_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Mask all interrupts */
+ ret = phy_write(phydev, PHY_IMASK, 0);
+ if (ret)
+ return ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
+static int gpy_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->is_c45) {
+ ret = phy_get_c45_ids(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Show GPY PHY FW version in dmesg */
+ ret = phy_read(phydev, PHY_FWV);
+ if (ret < 0)
+ return ret;
+
+ phydev_info(phydev, "Firmware Version: 0x%04X (%s)\n", ret,
+ (ret & PHY_FWV_REL_MASK) ? "release" : "test");
+
+ return 0;
+}
+
+static bool gpy_sgmii_need_reaneg(struct phy_device *phydev)
+{
+ int fw_ver, fw_type, fw_minor;
+ size_t i;
+
+ fw_ver = phy_read(phydev, PHY_FWV);
+ if (fw_ver < 0)
+ return true;
+
+ fw_type = FIELD_GET(PHY_FWV_TYPE_MASK, fw_ver);
+ fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_ver);
+
+ for (i = 0; i < ARRAY_SIZE(ver_need_sgmii_reaneg); i++) {
+ if (fw_type != ver_need_sgmii_reaneg[i].type)
+ continue;
+ if (fw_minor < ver_need_sgmii_reaneg[i].minor)
+ return true;
+ break;
+ }
+
+ return false;
+}
+
+static bool gpy_2500basex_chk(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, PHY_MIISTAT);
+ if (ret < 0) {
+ phydev_err(phydev, "Error: MDIO register access failed: %d\n",
+ ret);
+ return false;
+ }
+
+ if (!(ret & PHY_MIISTAT_LS) ||
+ FIELD_GET(PHY_MIISTAT_SPD_MASK, ret) != PHY_MIISTAT_SPD_2500)
+ return false;
+
+ phydev->speed = SPEED_2500;
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_CTRL_ANEN, 0);
+ return true;
+}
+
+static bool gpy_sgmii_aneg_en(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL);
+ if (ret < 0) {
+ phydev_err(phydev, "Error: MMD register access failed: %d\n",
+ ret);
+ return true;
+ }
+
+ return (ret & VSPEC1_SGMII_CTRL_ANEN) ? true : false;
+}
+
+static int gpy_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* Configure half duplex with genphy_setup_forced,
+ * because genphy_c45_pma_setup_forced does not support.
+ */
+ return phydev->duplex != DUPLEX_FULL
+ ? genphy_setup_forced(phydev)
+ : genphy_c45_pma_setup_forced(phydev);
+ }
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_changed(phydev, MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ ret = genphy_c45_check_and_restart_aneg(phydev, changed);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
+ return 0;
+
+ /* No need to trigger re-ANEG if link speed is 2.5G or SGMII ANEG is
+ * disabled.
+ */
+ if (!gpy_sgmii_need_reaneg(phydev) || gpy_2500basex_chk(phydev) ||
+ !gpy_sgmii_aneg_en(phydev))
+ return 0;
+
+ /* There is a design constraint in GPY2xx device where SGMII AN is
+ * only triggered when there is change of speed. If, PHY link
+ * partner`s speed is still same even after PHY TPI is down and up
+ * again, SGMII AN is not triggered and hence no new in-band message
+ * from GPY to MAC side SGMII.
+ * This could cause an issue during power up, when PHY is up prior to
+ * MAC. At this condition, once MAC side SGMII is up, MAC side SGMII
+ * wouldn`t receive new in-band message from GPY with correct link
+ * status, speed and duplex info.
+ *
+ * 1) If PHY is already up and TPI link status is still down (such as
+ * hard reboot), TPI link status is polled for 4 seconds before
+ * retriggerring SGMII AN.
+ * 2) If PHY is already up and TPI link status is also up (such as soft
+ * reboot), polling of TPI link status is not needed and SGMII AN is
+ * immediately retriggered.
+ * 3) Other conditions such as PHY is down, speed change etc, skip
+ * retriggering SGMII AN. Note: in case of speed change, GPY FW will
+ * initiate SGMII AN.
+ */
+
+ if (phydev->state != PHY_UP)
+ return 0;
+
+ ret = phy_read_poll_timeout(phydev, MII_BMSR, ret, ret & BMSR_LSTATUS,
+ 20000, 4000000, false);
+ if (ret == -ETIMEDOUT)
+ return 0;
+ else if (ret < 0)
+ return ret;
+
+ /* Trigger SGMII AN. */
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_CTRL_ANRS, VSPEC1_SGMII_CTRL_ANRS);
+}
+
+static void gpy_update_interface(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Interface mode is fixed for USXGMII and integrated PHY */
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
+
+ /* Automatically switch SERDES interface between SGMII and 2500-BaseX
+ * according to speed. Disable ANEG in 2500-BaseX mode.
+ */
+ switch (phydev->speed) {
+ case SPEED_2500:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_CTRL_ANEN, 0);
+ if (ret < 0)
+ phydev_err(phydev,
+ "Error: Disable of SGMII ANEG failed: %d\n",
+ ret);
+ break;
+ case SPEED_1000:
+ case SPEED_100:
+ case SPEED_10:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ if (gpy_sgmii_aneg_en(phydev))
+ break;
+ /* Enable and restart SGMII ANEG for 10/100/1000Mbps link speed
+ * if ANEG is disabled (in 2500-BaseX mode).
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL,
+ VSPEC1_SGMII_ANEN_ANRS,
+ VSPEC1_SGMII_ANEN_ANRS);
+ if (ret < 0)
+ phydev_err(phydev,
+ "Error: Enable of SGMII ANEG failed: %d\n",
+ ret);
+ break;
+ }
+}
+
+static int gpy_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Read the link partner's 1G advertisement */
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ ret = phy_read(phydev, PHY_MIISTAT);
+ if (ret < 0)
+ return ret;
+
+ phydev->link = (ret & PHY_MIISTAT_LS) ? 1 : 0;
+ phydev->duplex = (ret & PHY_MIISTAT_DPX) ? DUPLEX_FULL : DUPLEX_HALF;
+ switch (FIELD_GET(PHY_MIISTAT_SPD_MASK, ret)) {
+ case PHY_MIISTAT_SPD_10:
+ phydev->speed = SPEED_10;
+ break;
+ case PHY_MIISTAT_SPD_100:
+ phydev->speed = SPEED_100;
+ break;
+ case PHY_MIISTAT_SPD_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case PHY_MIISTAT_SPD_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+
+ if (phydev->link)
+ gpy_update_interface(phydev);
+
+ return 0;
+}
+
+static int gpy_config_intr(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ mask = PHY_IMASK_MASK;
+
+ return phy_write(phydev, PHY_IMASK, mask);
+}
+
+static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, PHY_ISTAT);
+ if (reg < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(reg & PHY_IMASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int gpy_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *attach_dev = phydev->attached_dev;
+ int ret;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* MAC address - Byte0:Byte1:Byte2:Byte3:Byte4:Byte5
+ * VPSPEC2_WOL_AD45 = Byte0:Byte1
+ * VPSPEC2_WOL_AD23 = Byte2:Byte3
+ * VPSPEC2_WOL_AD01 = Byte4:Byte5
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_AD45,
+ ((attach_dev->dev_addr[0] << 8) |
+ attach_dev->dev_addr[1]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_AD23,
+ ((attach_dev->dev_addr[2] << 8) |
+ attach_dev->dev_addr[3]));
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_AD01,
+ ((attach_dev->dev_addr[4] << 8) |
+ attach_dev->dev_addr[5]));
+ if (ret < 0)
+ return ret;
+
+ /* Enable the WOL interrupt */
+ ret = phy_write(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ /* Enable magic packet matching */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_CTL,
+ WOL_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the interrupt status register.
+ * Only WoL is enabled so clear all.
+ */
+ ret = phy_read(phydev, PHY_ISTAT);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable magic packet matching */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ VPSPEC2_WOL_CTL,
+ WOL_EN);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wol->wolopts & WAKE_PHY) {
+ /* Enable the link state change interrupt */
+ ret = phy_set_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the interrupt status register */
+ ret = phy_read(phydev, PHY_ISTAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
+ phy_trigger_machine(phydev);
+
+ return 0;
+ }
+
+ /* Disable the link state change interrupt */
+ return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+}
+
+static void gpy_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
+ if (ret & WOL_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ ret = phy_read(phydev, PHY_IMASK);
+ if (ret & PHY_IMASK_LSTC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int gpy_loopback(struct phy_device *phydev, bool enable)
+{
+ int ret;
+
+ ret = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ enable ? BMCR_LOOPBACK : 0);
+ if (!ret) {
+ /* It takes some time for PHY device to switch
+ * into/out-of loopback mode.
+ */
+ msleep(100);
+ }
+
+ return ret;
+}
+
+static struct phy_driver gpy_drivers[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
+ .name = "Maxlinear Ethernet GPY2xx",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY115B,
+ .phy_id_mask = PHY_ID_GPYx15B_MASK,
+ .name = "Maxlinear Ethernet GPY115B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY115C),
+ .name = "Maxlinear Ethernet GPY115C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY211B,
+ .phy_id_mask = PHY_ID_GPY21xB_MASK,
+ .name = "Maxlinear Ethernet GPY211B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY211C),
+ .name = "Maxlinear Ethernet GPY211C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY212B,
+ .phy_id_mask = PHY_ID_GPY21xB_MASK,
+ .name = "Maxlinear Ethernet GPY212B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY212C),
+ .name = "Maxlinear Ethernet GPY212C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ .phy_id = PHY_ID_GPY215B,
+ .phy_id_mask = PHY_ID_GPYx15B_MASK,
+ .name = "Maxlinear Ethernet GPY215B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY215C),
+ .name = "Maxlinear Ethernet GPY215C",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY241B),
+ .name = "Maxlinear Ethernet GPY241B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM),
+ .name = "Maxlinear Ethernet GPY241BM",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_GPY245B),
+ .name = "Maxlinear Ethernet GPY245B",
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_init = gpy_config_init,
+ .probe = gpy_probe,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = gpy_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .read_status = gpy_read_status,
+ .config_intr = gpy_config_intr,
+ .handle_interrupt = gpy_handle_interrupt,
+ .set_wol = gpy_set_wol,
+ .get_wol = gpy_get_wol,
+ .set_loopback = gpy_loopback,
+ },
+};
+module_phy_driver(gpy_drivers);
+
+static struct mdio_device_id __maybe_unused gpy_tbl[] = {
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)},
+ {PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)},
+ {PHY_ID_GPY211B, PHY_ID_GPY21xB_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY211C)},
+ {PHY_ID_GPY212B, PHY_ID_GPY21xB_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY212C)},
+ {PHY_ID_GPY215B, PHY_ID_GPYx15B_MASK},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY215C)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY241B)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM)},
+ {PHY_ID_MATCH_MODEL(PHY_ID_GPY245B)},
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, gpy_tbl);
+
+MODULE_DESCRIPTION("Maxlinear Ethernet GPY Driver");
+MODULE_AUTHOR("Xu Liang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index afd7afa1f498..9944cc501806 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -47,12 +47,14 @@
#define MII_INTSRC_LINK_FAIL BIT(10)
#define MII_INTSRC_LINK_UP BIT(9)
#define MII_INTSRC_MASK (MII_INTSRC_LINK_FAIL | MII_INTSRC_LINK_UP)
-#define MII_INTSRC_TEMP_ERR BIT(1)
#define MII_INTSRC_UV_ERR BIT(3)
+#define MII_INTSRC_TEMP_ERR BIT(1)
#define MII_INTEN 22
#define MII_INTEN_LINK_FAIL BIT(10)
#define MII_INTEN_LINK_UP BIT(9)
+#define MII_INTEN_UV_ERR BIT(3)
+#define MII_INTEN_TEMP_ERR BIT(1)
#define MII_COMMSTAT 23
#define MII_COMMSTAT_LINK_UP BIT(15)
@@ -607,7 +609,8 @@ static int tja11xx_config_intr(struct phy_device *phydev)
if (err)
return err;
- value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP;
+ value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP |
+ MII_INTEN_UV_ERR | MII_INTEN_TEMP_ERR;
err = phy_write(phydev, MII_INTEN, value);
} else {
err = phy_write(phydev, MII_INTEN, value);
@@ -622,6 +625,7 @@ static int tja11xx_config_intr(struct phy_device *phydev)
static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev)
{
+ struct device *dev = &phydev->mdio.dev;
int irq_status;
irq_status = phy_read(phydev, MII_INTSRC);
@@ -630,6 +634,11 @@ static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
+ if (irq_status & MII_INTSRC_TEMP_ERR)
+ dev_warn(dev, "Overtemperature error detected (temp > 155C°).\n");
+ if (irq_status & MII_INTSRC_UV_ERR)
+ dev_warn(dev, "Undervoltage error detected.\n");
+
if (!(irq_status & MII_INTSRC_MASK))
return IRQ_NONE;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8eeb26d8aeb7..f124a8a58bd4 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -426,7 +426,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
EXPORT_SYMBOL(phy_mii_ioctl);
/**
- * phy_do_ioctl - generic ndo_do_ioctl implementation
+ * phy_do_ioctl - generic ndo_eth_ioctl implementation
* @dev: the net_device struct
* @ifr: &struct ifreq for socket ioctl's
* @cmd: ioctl cmd to execute
@@ -441,7 +441,7 @@ int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
EXPORT_SYMBOL(phy_do_ioctl);
/**
- * phy_do_ioctl_running - generic ndo_do_ioctl implementation but test first
+ * phy_do_ioctl_running - generic ndo_eth_ioctl implementation but test first
*
* @dev: the net_device struct
* @ifr: &struct ifreq for socket ioctl's
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5d5f9a9ee768..107aa6d7bc6b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -969,6 +969,20 @@ void phy_device_remove(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_remove);
/**
+ * phy_get_c45_ids - Read 802.3-c45 IDs for phy device.
+ * @phydev: phy_device structure to read 802.3-c45 IDs
+ *
+ * Returns zero on success, %-EIO on bus access error, or %-ENODEV if
+ * the "devices in package" is invalid.
+ */
+int phy_get_c45_ids(struct phy_device *phydev)
+{
+ return get_phy_c45_ids(phydev->mdio.bus, phydev->mdio.addr,
+ &phydev->c45_ids);
+}
+EXPORT_SYMBOL(phy_get_c45_ids);
+
+/**
* phy_find_first - finds the first PHY device on the bus
* @bus: the target MII bus
*/
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index eb29ef53d971..2cdf9f989dec 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -942,10 +942,11 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
- phy_duplex_to_str(phydev->duplex));
+ phy_duplex_to_str(phydev->duplex),
+ phylink_pause_to_str(pl->phy_state.pause));
}
static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
@@ -1457,15 +1458,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
return phy_ethtool_ksettings_set(pl->phydev, kset);
}
- linkmode_copy(support, pl->supported);
config = pl->link_config;
- config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE;
- /* Mask out unsupported advertisements, and force the autoneg bit */
+ /* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
- support);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
- config.an_enabled);
+ pl->supported);
/* FIXME: should we reject autoneg if phy/mac does not support it? */
switch (kset->base.autoneg) {
@@ -1474,7 +1471,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- support, false);
+ pl->supported, false);
if (!s)
return -EINVAL;
@@ -1515,6 +1512,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* We have ruled out the case with a PHY attached, and the
* fixed-link cases. All that is left are in-band links.
*/
+ config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
+ config.an_enabled);
+
+ /* Validate without changing the current supported mask. */
+ linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config))
return -EINVAL;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index e26cf91bdec2..82d609401711 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -84,6 +84,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
extra grounds are 18,19,20,21,22,23,24
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -150,7 +151,8 @@ static int plip_hard_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type);
static int plip_open(struct net_device *dev);
static int plip_close(struct net_device *dev);
-static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int plip_preempt(void *handle);
static void plip_wakeup(void *handle);
@@ -265,7 +267,7 @@ static const struct net_device_ops plip_netdev_ops = {
.ndo_open = plip_open,
.ndo_stop = plip_close,
.ndo_start_xmit = plip_tx_packet,
- .ndo_do_ioctl = plip_ioctl,
+ .ndo_siocdevprivate = plip_siocdevprivate,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1207,7 +1209,8 @@ plip_wakeup(void *handle)
}
static int
-plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct net_local *nl = netdev_priv(dev);
struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
@@ -1215,6 +1218,9 @@ plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd != SIOCDEVPLIP)
return -EOPNOTSUPP;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
switch(pc->pcmd) {
case PLIP_GET_TIMEOUT:
pc->trigger = nl->trigger;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 7a099c37527f..e9e81573f21e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1463,11 +1463,11 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
static int
-ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *addr, int cmd)
{
struct ppp *ppp = netdev_priv(dev);
int err = -EFAULT;
- void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
struct ppp_stats stats;
struct ppp_comp_stats cstats;
char *vers;
@@ -1596,7 +1596,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
- .ndo_do_ioctl = ppp_net_ioctl,
+ .ndo_siocdevprivate = ppp_net_siocdevprivate,
.ndo_get_stats64 = ppp_get_stats64,
.ndo_fill_forward_path = ppp_fill_forward_path,
};
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index e88af978f63c..f01c9db01b16 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -78,7 +78,8 @@ struct sb1000_private {
/* prototypes for Linux interface */
extern int sb1000_probe(struct net_device *dev);
static int sb1000_open(struct net_device *dev);
-static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
+static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t sb1000_interrupt(int irq, void *dev_id);
@@ -135,7 +136,7 @@ MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
static const struct net_device_ops sb1000_netdev_ops = {
.ndo_open = sb1000_open,
.ndo_start_xmit = sb1000_start_xmit,
- .ndo_do_ioctl = sb1000_dev_ioctl,
+ .ndo_siocdevprivate = sb1000_siocdevprivate,
.ndo_stop = sb1000_close,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -987,7 +988,8 @@ sb1000_open(struct net_device *dev)
return 0; /* Always succeed */
}
-static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
char* name;
unsigned char version[2];
@@ -1011,7 +1013,7 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
stats[2] = dev->stats.rx_packets;
stats[3] = dev->stats.rx_errors;
stats[4] = dev->stats.rx_dropped;
- if(copy_to_user(ifr->ifr_data, stats, sizeof(stats)))
+ if (copy_to_user(data, stats, sizeof(stats)))
return -EFAULT;
status = 0;
break;
@@ -1019,21 +1021,21 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGCMFIRMWARE: /* get firmware version */
if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
return status;
- if(copy_to_user(ifr->ifr_data, version, sizeof(version)))
+ if (copy_to_user(data, version, sizeof(version)))
return -EFAULT;
break;
case SIOCGCMFREQUENCY: /* get frequency */
if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
return status;
- if(put_user(frequency, (int __user *) ifr->ifr_data))
+ if (put_user(frequency, (int __user *)data))
return -EFAULT;
break;
case SIOCSCMFREQUENCY: /* set frequency */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(get_user(frequency, (int __user *) ifr->ifr_data))
+ if (get_user(frequency, (int __user *)data))
return -EFAULT;
if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
return status;
@@ -1042,14 +1044,14 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGCMPIDS: /* get PIDs */
if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
return status;
- if(copy_to_user(ifr->ifr_data, PID, sizeof(PID)))
+ if (copy_to_user(data, PID, sizeof(PID)))
return -EFAULT;
break;
case SIOCSCMPIDS: /* set PIDs */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(copy_from_user(PID, ifr->ifr_data, sizeof(PID)))
+ if (copy_from_user(PID, data, sizeof(PID)))
return -EFAULT;
if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
return status;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index dc84cb844319..5435b5689ce6 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -62,6 +62,7 @@
*/
#define SL_CHECK_TRANSMIT
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -108,7 +109,7 @@ static void slip_unesc6(struct slip *sl, unsigned char c);
#ifdef CONFIG_SLIP_SMART
static void sl_keepalive(struct timer_list *t);
static void sl_outfill(struct timer_list *t);
-static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd);
#endif
/********************************
@@ -647,7 +648,7 @@ static const struct net_device_ops sl_netdev_ops = {
.ndo_change_mtu = sl_change_mtu,
.ndo_tx_timeout = sl_tx_timeout,
#ifdef CONFIG_SLIP_SMART
- .ndo_do_ioctl = sl_ioctl,
+ .ndo_siocdevprivate = sl_siocdevprivate,
#endif
};
@@ -1179,11 +1180,12 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
/* VSV changes start here */
#ifdef CONFIG_SLIP_SMART
-/* function do_ioctl called from net/core/dev.c
+/* function sl_siocdevprivate called from net/core/dev.c
to allow get/set outfill/keepalive parameter
by ifconfig */
-static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct slip *sl = netdev_priv(dev);
unsigned long *p = (unsigned long *)&rq->ifr_ifru;
@@ -1191,6 +1193,9 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (sl == NULL) /* Allocation failed ?? */
return -ENODEV;
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
spin_lock_bh(&sl->lock);
if (!sl->tty) {
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c115216420a..cb01897c7a5d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -197,7 +197,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = asix_ioctl,
+ .ndo_eth_ioctl = asix_ioctl,
.ndo_set_rx_mode = ax88172_set_multicast,
};
@@ -589,7 +589,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
@@ -714,7 +714,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN] = {0}, chipcode = 0;
struct asix_common_private *priv;
int ret, i;
- u32 phyid;
usbnet_get_endpoints(dev, intf);
@@ -762,10 +761,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- /* Read PHYID register *AFTER* the PHY was reset properly */
- phyid = asix_get_phyid(dev);
- netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
-
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
@@ -1100,7 +1095,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
- .ndo_do_ioctl = asix_ioctl,
+ .ndo_eth_ioctl = asix_ioctl,
.ndo_change_mtu = ax88178_change_mtu,
};
@@ -1215,6 +1210,7 @@ static const struct driver_info ax88772b_info = {
.unbind = ax88772_unbind,
.status = asix_status,
.reset = ax88772_reset,
+ .stop = ax88772_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 530947d7477b..d9777d9a7c5d 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -109,7 +109,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index c1316718304d..f25448a08870 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1035,7 +1035,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
.ndo_change_mtu = ax88179_change_mtu,
.ndo_set_mac_address = ax88179_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = ax88179_ioctl,
+ .ndo_eth_ioctl = ax88179_ioctl,
.ndo_set_rx_mode = ax88179_set_multicast,
.ndo_set_features = ax88179_set_features,
};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 8d1f69dad603..e1da9102a540 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -253,7 +253,8 @@ static int usbpn_close(struct net_device *dev)
return usb_set_interface(pnd->usb, num, !pnd->active_setting);
}
-static int usbpn_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int usbpn_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
struct if_phonet_req *req = (struct if_phonet_req *)ifr;
@@ -269,7 +270,7 @@ static const struct net_device_ops usbpn_ops = {
.ndo_open = usbpn_open,
.ndo_stop = usbpn_close,
.ndo_start_xmit = usbpn_xmit,
- .ndo_do_ioctl = usbpn_ioctl,
+ .ndo_siocdevprivate = usbpn_siocdevprivate,
};
static void usbpn_setup(struct net_device *dev)
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 89cc61d7a675..907f98b1eefe 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -345,7 +345,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = dm9601_ioctl,
+ .ndo_eth_ioctl = dm9601_ioctl,
.ndo_set_rx_mode = dm9601_set_multicast,
.ndo_set_mac_address = dm9601_set_mac_address,
};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index dec96e8ab567..24bc1e678b7b 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1079,8 +1079,7 @@ static void hso_init_termios(struct ktermios *termios)
tty_termios_encode_baud_rate(termios, 115200, 115200);
}
-static void _hso_serial_set_termios(struct tty_struct *tty,
- struct ktermios *old)
+static void _hso_serial_set_termios(struct tty_struct *tty)
{
struct hso_serial *serial = tty->driver_data;
@@ -1262,7 +1261,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
if (serial->port.count == 1) {
serial->rx_state = RX_IDLE;
/* Force default termio settings */
- _hso_serial_set_termios(tty, NULL);
+ _hso_serial_set_termios(tty);
tasklet_setup(&serial->unthrottle_tasklet,
hso_unthrottle_tasklet);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
@@ -1394,7 +1393,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
/* the actual setup */
spin_lock_irqsave(&serial->serial_lock, flags);
if (serial->port.count)
- _hso_serial_set_termios(tty, old);
+ _hso_serial_set_termios(tty);
else
tty->termios = *old;
spin_unlock_irqrestore(&serial->serial_lock, flags);
@@ -2353,7 +2352,7 @@ static int remove_net_device(struct hso_device *hso_dev)
}
/* Frees our network device */
-static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
+static void hso_free_net_device(struct hso_device *hso_dev)
{
int i;
struct hso_net *hso_net = dev2net(hso_dev);
@@ -2376,7 +2375,7 @@ static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL;
- if (hso_net->net && !bailout)
+ if (hso_net->net)
free_netdev(hso_net->net);
kfree(hso_dev);
@@ -3133,7 +3132,7 @@ static void hso_free_interface(struct usb_interface *interface)
rfkill_unregister(rfk);
rfkill_destroy(rfk);
}
- hso_free_net_device(network_table[i], false);
+ hso_free_net_device(network_table[i]);
}
}
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 207e59e74935..06e2181e5810 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -443,7 +443,7 @@ static int ipheth_probe(struct usb_interface *intf,
netdev->netdev_ops = &ipheth_netdev_ops;
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
- strcpy(netdev->name, "eth%d");
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
dev = netdev_priv(netdev);
dev->udev = udev;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6d092d78e0cb..4e8d3c28f73e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3609,7 +3609,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_change_mtu = lan78xx_change_mtu,
.ndo_set_mac_address = lan78xx_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = lan78xx_set_multicast,
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 2469bdcb1a04..66866bef25df 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -464,7 +464,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mcs7830_ioctl,
+ .ndo_eth_ioctl = mcs7830_ioctl,
.ndo_set_rx_mode = mcs7830_set_multicast,
.ndo_set_mac_address = mcs7830_set_mac_address,
};
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 652e9fcf0b77..36dafcb3d04a 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1001,7 +1001,8 @@ static const struct ethtool_ops ops = {
.set_link_ksettings = pegasus_set_link_ksettings,
};
-static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq,
+ void __user *udata, int cmd)
{
__u16 *data = (__u16 *) &rq->ifr_ifru;
pegasus_t *pegasus = netdev_priv(net);
@@ -1269,7 +1270,7 @@ static int pegasus_resume(struct usb_interface *intf)
static const struct net_device_ops pegasus_netdev_ops = {
.ndo_open = pegasus_open,
.ndo_stop = pegasus_close,
- .ndo_do_ioctl = pegasus_ioctl,
+ .ndo_siocdevprivate = pegasus_siocdevprivate,
.ndo_start_xmit = pegasus_start_xmit,
.ndo_set_rx_mode = pegasus_set_multicast,
.ndo_tx_timeout = pegasus_tx_timeout,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 79832374f78d..aa66671c484d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9190,7 +9190,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_open = rtl8152_open,
.ndo_stop = rtl8152_close,
- .ndo_do_ioctl = rtl8152_ioctl,
+ .ndo_eth_ioctl = rtl8152_ioctl,
.ndo_start_xmit = rtl8152_start_xmit,
.ndo_tx_timeout = rtl8152_tx_timeout,
.ndo_set_features = rtl8152_set_features,
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 7656f2a3afd9..4a1b0e0fc3a3 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -822,7 +822,8 @@ static const struct ethtool_ops ops = {
.get_link_ksettings = rtl8150_get_link_ksettings,
};
-static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq,
+ void __user *udata, int cmd)
{
rtl8150_t *dev = netdev_priv(netdev);
u16 *data = (u16 *) & rq->ifr_ifru;
@@ -850,7 +851,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static const struct net_device_ops rtl8150_netdev_ops = {
.ndo_open = rtl8150_open,
.ndo_stop = rtl8150_close,
- .ndo_do_ioctl = rtl8150_ioctl,
+ .ndo_siocdevprivate = rtl8150_siocdevprivate,
.ndo_start_xmit = rtl8150_start_xmit,
.ndo_tx_timeout = rtl8150_tx_timeout,
.ndo_set_rx_mode = rtl8150_set_multicast,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 13141dbfa3a8..76f7af161313 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1439,7 +1439,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_change_mtu = smsc75xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = smsc75xx_ioctl,
+ .ndo_eth_ioctl = smsc75xx_ioctl,
.ndo_set_rx_mode = smsc75xx_set_multicast,
.ndo_set_features = smsc75xx_set_features,
};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 4c8ee1cff4d4..7d953974eb9b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1044,7 +1044,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = smsc95xx_ioctl,
+ .ndo_eth_ioctl = smsc95xx_ioctl,
.ndo_set_rx_mode = smsc95xx_set_multicast,
.ndo_set_features = smsc95xx_set_features,
};
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index ce29261263cd..6516a37893e2 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -310,7 +310,7 @@ static const struct net_device_ops sr9700_netdev_ops = {
.ndo_change_mtu = usbnet_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = sr9700_ioctl,
+ .ndo_eth_ioctl = sr9700_ioctl,
.ndo_set_rx_mode = sr9700_set_multicast,
.ndo_set_mac_address = sr9700_set_mac_address,
};
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index a822d81310d5..576401c8b1be 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -684,7 +684,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = sr_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = sr_ioctl,
+ .ndo_eth_ioctl = sr_ioctl,
.ndo_set_rx_mode = sr_set_multicast,
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 470e1c1e6353..840c1c2ab16a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1725,7 +1725,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->interrupt_count = 0;
dev->net = net;
- strcpy (net->name, "usb%d");
+ strscpy(net->name, "usb%d", sizeof(net->name));
memcpy (net->dev_addr, node_id, sizeof node_id);
/* rx and tx sides can use different message sizes;
@@ -1752,13 +1752,13 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
(net->dev_addr [0] & 0x02) == 0))
- strcpy (net->name, "eth%d");
+ strscpy(net->name, "eth%d", sizeof(net->name));
/* WLAN devices should always be named "wlan%d" */
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
- strcpy(net->name, "wlan%d");
+ strscpy(net->name, "wlan%d", sizeof(net->name));
/* WWAN devices should always be named "wwan%d" */
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
- strcpy(net->name, "wwan%d");
+ strscpy(net->name, "wwan%d", sizeof(net->name));
/* devices that cannot do ARP */
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index bdb7ce3cb054..50eb43e5bf45 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -224,12 +224,13 @@ static void veth_get_channels(struct net_device *dev,
{
channels->tx_count = dev->real_num_tx_queues;
channels->rx_count = dev->real_num_rx_queues;
- channels->max_tx = dev->real_num_tx_queues;
- channels->max_rx = dev->real_num_rx_queues;
- channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
- channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
+ channels->max_tx = dev->num_tx_queues;
+ channels->max_rx = dev->num_rx_queues;
}
+static int veth_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch);
+
static const struct ethtool_ops veth_ethtool_ops = {
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -239,6 +240,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_link_ksettings = veth_get_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
.get_channels = veth_get_channels,
+ .set_channels = veth_set_channels,
};
/* general routines */
@@ -711,7 +713,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
int mac_len, delta, off;
struct xdp_buff xdp;
- skb_orphan_partial(skb);
+ skb_prepare_for_gro(skb);
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -928,12 +930,12 @@ static int veth_poll(struct napi_struct *napi, int budget)
return done;
}
-static int __veth_napi_enable(struct net_device *dev)
+static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
@@ -941,7 +943,7 @@ static int __veth_napi_enable(struct net_device *dev)
goto err_xdp_ring;
}
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
napi_enable(&rq->xdp_napi);
@@ -949,19 +951,25 @@ static int __veth_napi_enable(struct net_device *dev)
}
return 0;
+
err_xdp_ring:
- for (i--; i >= 0; i--)
+ for (i--; i >= start; i--)
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
return err;
}
-static void veth_napi_del(struct net_device *dev)
+static int __veth_napi_enable(struct net_device *dev)
+{
+ return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
+}
+
+static void veth_napi_del_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rcu_assign_pointer(priv->rq[i].napi, NULL);
@@ -970,7 +978,7 @@ static void veth_napi_del(struct net_device *dev)
}
synchronize_net();
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rq->rx_notify_masked = false;
@@ -978,41 +986,90 @@ static void veth_napi_del(struct net_device *dev)
}
}
+static void veth_napi_del(struct net_device *dev)
+{
+ veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
+}
+
static bool veth_gro_requested(const struct net_device *dev)
{
return !!(dev->wanted_features & NETIF_F_GRO);
}
-static int veth_enable_xdp(struct net_device *dev)
+static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
+ bool napi_already_on)
{
- bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- struct veth_rq *rq = &priv->rq[i];
+ for (i = start; i < end; i++) {
+ struct veth_rq *rq = &priv->rq[i];
- if (!napi_already_on)
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
- err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
- if (err < 0)
- goto err_rxq_reg;
+ if (!napi_already_on)
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
+ if (err < 0)
+ goto err_rxq_reg;
- err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err < 0)
- goto err_reg_mem;
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err < 0)
+ goto err_reg_mem;
- /* Save original mem info as it can be overwritten */
- rq->xdp_mem = rq->xdp_rxq.mem;
- }
+ /* Save original mem info as it can be overwritten */
+ rq->xdp_mem = rq->xdp_rxq.mem;
+ }
+ return 0;
+
+err_reg_mem:
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+err_rxq_reg:
+ for (i--; i >= start; i--) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (!napi_already_on)
+ netif_napi_del(&rq->xdp_napi);
+ }
+
+ return err;
+}
+
+static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
+ bool delete_napi)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = start; i < end; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ rq->xdp_rxq.mem = rq->xdp_mem;
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
+ if (delete_napi)
+ netif_napi_del(&rq->xdp_napi);
+ }
+}
+
+static int veth_enable_xdp(struct net_device *dev)
+{
+ bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
+ err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
+ if (err)
+ return err;
if (!napi_already_on) {
err = __veth_napi_enable(dev);
- if (err)
- goto err_rxq_reg;
+ if (err) {
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
+ return err;
+ }
if (!veth_gro_requested(dev)) {
/* user-space did not require GRO, but adding XDP
@@ -1030,18 +1087,6 @@ static int veth_enable_xdp(struct net_device *dev)
}
return 0;
-err_reg_mem:
- xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
-err_rxq_reg:
- for (i--; i >= 0; i--) {
- struct veth_rq *rq = &priv->rq[i];
-
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- if (!napi_already_on)
- netif_napi_del(&rq->xdp_napi);
- }
-
- return err;
}
static void veth_disable_xdp(struct net_device *dev)
@@ -1064,28 +1109,23 @@ static void veth_disable_xdp(struct net_device *dev)
}
}
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- struct veth_rq *rq = &priv->rq[i];
-
- rq->xdp_rxq.mem = rq->xdp_mem;
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- }
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
-static int veth_napi_enable(struct net_device *dev)
+static int veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
}
- err = __veth_napi_enable(dev);
+ err = __veth_napi_enable_range(dev, start, end);
if (err) {
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_del(&rq->xdp_napi);
@@ -1095,6 +1135,128 @@ static int veth_napi_enable(struct net_device *dev)
return err;
}
+static int veth_napi_enable(struct net_device *dev)
+{
+ return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
+}
+
+static void veth_disable_range_safe(struct net_device *dev, int start, int end)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ if (start >= end)
+ return;
+
+ if (priv->_xdp_prog) {
+ veth_napi_del_range(dev, start, end);
+ veth_disable_xdp_range(dev, start, end, false);
+ } else if (veth_gro_requested(dev)) {
+ veth_napi_del_range(dev, start, end);
+ }
+}
+
+static int veth_enable_range_safe(struct net_device *dev, int start, int end)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (start >= end)
+ return 0;
+
+ if (priv->_xdp_prog) {
+ /* these channels are freshly initialized, napi is not on there even
+ * when GRO is requeste
+ */
+ err = veth_enable_xdp_range(dev, start, end, false);
+ if (err)
+ return err;
+
+ err = __veth_napi_enable_range(dev, start, end);
+ if (err) {
+ /* on error always delete the newly added napis */
+ veth_disable_xdp_range(dev, start, end, true);
+ return err;
+ }
+ } else if (veth_gro_requested(dev)) {
+ return veth_napi_enable_range(dev, start, end);
+ }
+ return 0;
+}
+
+static int veth_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ unsigned int old_rx_count, new_rx_count;
+ struct veth_priv *peer_priv;
+ struct net_device *peer;
+ int err;
+
+ /* sanity check. Upper bounds are already enforced by the caller */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ /* avoid braking XDP, if that is enabled */
+ peer = rtnl_dereference(priv->peer);
+ peer_priv = peer ? netdev_priv(peer) : NULL;
+ if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
+ return -EINVAL;
+
+ if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
+ return -EINVAL;
+
+ old_rx_count = dev->real_num_rx_queues;
+ new_rx_count = ch->rx_count;
+ if (netif_running(dev)) {
+ /* turn device off */
+ netif_carrier_off(dev);
+ if (peer)
+ netif_carrier_off(peer);
+
+ /* try to allocate new resurces, as needed*/
+ err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
+ if (err)
+ goto out;
+ }
+
+ err = netif_set_real_num_rx_queues(dev, ch->rx_count);
+ if (err)
+ goto revert;
+
+ err = netif_set_real_num_tx_queues(dev, ch->tx_count);
+ if (err) {
+ int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
+
+ /* this error condition could happen only if rx and tx change
+ * in opposite directions (e.g. tx nr raises, rx nr decreases)
+ * and we can't do anything to fully restore the original
+ * status
+ */
+ if (err2)
+ pr_warn("Can't restore rx queues config %d -> %d %d",
+ new_rx_count, old_rx_count, err2);
+ else
+ goto revert;
+ }
+
+out:
+ if (netif_running(dev)) {
+ /* note that we need to swap the arguments WRT the enable part
+ * to identify the range we have to disable
+ */
+ veth_disable_range_safe(dev, new_rx_count, old_rx_count);
+ netif_carrier_on(dev);
+ if (peer)
+ netif_carrier_on(peer);
+ }
+ return err;
+
+revert:
+ new_rx_count = old_rx_count;
+ old_rx_count = ch->rx_count;
+ goto out;
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -1447,6 +1609,23 @@ static void veth_disable_gro(struct net_device *dev)
netdev_update_features(dev);
}
+static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
+{
+ int err;
+
+ if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
+ err = netif_set_real_num_tx_queues(dev, 1);
+ if (err)
+ return err;
+ }
+ if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
+ err = netif_set_real_num_rx_queues(dev, 1);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int veth_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1556,13 +1735,21 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
priv = netdev_priv(dev);
rcu_assign_pointer(priv->peer, peer);
+ err = veth_init_queues(dev, tb);
+ if (err)
+ goto err_queues;
priv = netdev_priv(peer);
rcu_assign_pointer(priv->peer, dev);
+ err = veth_init_queues(peer, tb);
+ if (err)
+ goto err_queues;
veth_disable_gro(dev);
return 0;
+err_queues:
+ unregister_netdevice(dev);
err_register_dev:
/* nothing to do */
err_configure_peer:
@@ -1608,6 +1795,16 @@ static struct net *veth_get_link_net(const struct net_device *dev)
return peer ? dev_net(peer) : dev_net(dev);
}
+static unsigned int veth_get_num_queues(void)
+{
+ /* enforce the same queue limit as rtnl_create_link */
+ int queues = num_possible_cpus();
+
+ if (queues > 4096)
+ queues = 4096;
+ return queues;
+}
+
static struct rtnl_link_ops veth_link_ops = {
.kind = DRV_NAME,
.priv_size = sizeof(struct veth_priv),
@@ -1618,6 +1815,8 @@ static struct rtnl_link_ops veth_link_ops = {
.policy = veth_policy,
.maxtype = VETH_INFO_MAX,
.get_link_net = veth_get_link_net,
+ .get_num_tx_queues = veth_get_num_queues,
+ .get_num_rx_queues = veth_get_num_queues,
};
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index eee493685aad..c8c9ad7ca2b5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -380,7 +380,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
bool hdr_valid, unsigned int metasize,
- bool whole_page)
+ unsigned int headroom)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -398,28 +398,16 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- /* If whole_page, there is an offset between the beginning of the
+ /* If headroom is not 0, there is an offset between the beginning of the
* data and the allocated space, otherwise the data and the allocated
* space are aligned.
*
* Buffers with headroom use PAGE_SIZE as alloc size, see
* add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
- if (whole_page) {
- /* Buffers with whole_page use PAGE_SIZE as alloc size,
- * see add_recvbuf_mergeable() + get_mergeable_buf_len()
- */
- truesize = PAGE_SIZE;
-
- /* page maybe head page, so we should get the buf by p, not the
- * page
- */
- tailroom = truesize - len - offset_in_page(p);
- buf = (char *)((unsigned long)p & PAGE_MASK);
- } else {
- tailroom = truesize - len;
- buf = p;
- }
+ truesize = headroom ? PAGE_SIZE : truesize;
+ tailroom = truesize - len - headroom;
+ buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
@@ -978,7 +966,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
- metasize, true);
+ metasize,
+ VIRTIO_XDP_HEADROOM);
return head_skb;
}
break;
@@ -1029,7 +1018,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
- metasize, !!headroom);
+ metasize, headroom);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -2208,14 +2197,14 @@ static int virtnet_set_channels(struct net_device *dev,
if (vi->rq[0].xdp_prog)
return -EINVAL;
- get_online_cpus();
+ cpus_read_lock();
err = _virtnet_set_queues(vi, queue_pairs);
if (err) {
- put_online_cpus();
+ cpus_read_unlock();
goto err;
}
virtnet_set_affinity(vi);
- put_online_cpus();
+ cpus_read_unlock();
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
@@ -2970,9 +2959,9 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- get_online_cpus();
+ cpus_read_lock();
virtnet_set_affinity(vi);
- put_online_cpus();
+ cpus_read_unlock();
return 0;
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index c5a167a1c85c..7a38925f4165 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2020, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2021, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 8c014c98471c..f9f3a23d1698 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index a8d5ebd47c71..74d4e8bc4abc 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -98,6 +98,9 @@ enum {
VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
VMXNET3_CMD_GET_COALESCE,
VMXNET3_CMD_GET_RSS_FIELDS,
+ VMXNET3_CMD_GET_RESERVED2,
+ VMXNET3_CMD_GET_RESERVED3,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF,
};
/*
@@ -341,13 +344,15 @@ struct Vmxnet3_RxCompDescExt {
#define VMXNET3_TXD_EOP_SIZE 1
/* value of RxCompDesc.rssType */
-enum {
- VMXNET3_RCD_RSS_TYPE_NONE = 0,
- VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
- VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
- VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
- VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
-};
+#define VMXNET3_RCD_RSS_TYPE_NONE 0
+#define VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2
+#define VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+#define VMXNET3_RCD_RSS_TYPE_UDPIPV4 5
+#define VMXNET3_RCD_RSS_TYPE_UDPIPV6 6
+#define VMXNET3_RCD_RSS_TYPE_ESPIPV4 7
+#define VMXNET3_RCD_RSS_TYPE_ESPIPV6 8
/* a union for accessing all cmd/completion descriptors */
@@ -533,6 +538,13 @@ enum vmxnet3_intr_type {
/* addition 1 for events */
#define VMXNET3_MAX_INTRS 25
+/* Version 6 and later will use below macros */
+#define VMXNET3_EXT_MAX_TX_QUEUES 32
+#define VMXNET3_EXT_MAX_RX_QUEUES 32
+/* addition 1 for events */
+#define VMXNET3_EXT_MAX_INTRS 65
+#define VMXNET3_FIRST_SET_INTRS 64
+
/* value of intrCtrl */
#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
@@ -547,6 +559,19 @@ struct Vmxnet3_IntrConf {
__le32 reserved[2];
};
+struct Vmxnet3_IntrConfExt {
+ u8 autoMask;
+ u8 numIntrs; /* # of interrupts */
+ u8 eventIntrIdx;
+ u8 reserved;
+ __le32 intrCtrl;
+ __le32 reserved1;
+ u8 modLevels[VMXNET3_EXT_MAX_INTRS]; /* moderation level for
+ * each intr
+ */
+ u8 reserved2[3];
+};
+
/* one bit per VLAN ID, the size is in the units of u32 */
#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
@@ -719,11 +744,16 @@ struct Vmxnet3_DSDevRead {
struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
};
+struct Vmxnet3_DSDevReadExt {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ struct Vmxnet3_IntrConfExt intrConfExt;
+};
+
/* All structures in DriverShared are padded to multiples of 8 bytes */
struct Vmxnet3_DriverShared {
__le32 magic;
/* make devRead start at 64bit boundaries */
- __le32 pad;
+ __le32 size; /* size of DriverShared */
struct Vmxnet3_DSDevRead devRead;
__le32 ecr;
__le32 reserved;
@@ -734,6 +764,7 @@ struct Vmxnet3_DriverShared {
* command
*/
} cu;
+ struct Vmxnet3_DSDevReadExt devReadExt;
};
@@ -764,6 +795,7 @@ struct Vmxnet3_DriverShared {
((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_V6_MAX_MTU 9190
#define VMXNET3_MIN_MTU 60
#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6e87f1fc4874..e3c6b7e3bfdd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -1478,10 +1478,28 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
- (adapter->netdev->features & NETIF_F_RXHASH))
+ (adapter->netdev->features & NETIF_F_RXHASH)) {
+ enum pkt_hash_types hash_type;
+
+ switch (rcd->rssType) {
+ case VMXNET3_RCD_RSS_TYPE_IPV4:
+ case VMXNET3_RCD_RSS_TYPE_IPV6:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+ hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ }
skb_set_hash(ctx->skb,
le32_to_cpu(rcd->rssHash),
- PKT_HASH_TYPE_L3);
+ hash_type);
+ }
#endif
skb_put(ctx->skb, rcd->len);
@@ -2460,6 +2478,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
int i;
@@ -2572,14 +2591,26 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
#endif /* VMXNET3_RSS */
/* intr settings */
- devRead->intrConf.autoMask = adapter->intr.mask_mode ==
- VMXNET3_IMM_AUTO;
- devRead->intrConf.numIntrs = adapter->intr.num_intrs;
- for (i = 0; i < adapter->intr.num_intrs; i++)
- devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
+ if (!VMXNET3_VERSION_GE_6(adapter) ||
+ !adapter->queuesExtEnabled) {
+ devRead->intrConf.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devRead->intrConf.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
+
+ devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
+ devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ } else {
+ devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
- devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
- devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
+ devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+ }
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
@@ -2717,6 +2748,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
+ netif_tx_wake_all_queues(adapter->netdev);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
@@ -3372,6 +3404,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
int size;
int num_tx_queues;
int num_rx_queues;
+ int queues;
+ unsigned long flags;
if (!pci_msi_enabled())
enable_mq = 0;
@@ -3383,7 +3417,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
#endif
num_rx_queues = 1;
- num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -3391,13 +3424,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
num_tx_queues = 1;
- num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
- dev_info(&pdev->dev,
- "# of Tx queues : %d, # of Rx queues : %d\n",
- num_tx_queues, num_rx_queues);
-
if (!netdev)
return -ENOMEM;
@@ -3447,51 +3475,22 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_shared;
}
- adapter->num_rx_queues = num_rx_queues;
- adapter->num_tx_queues = num_tx_queues;
- adapter->rx_buf_per_pkt = 1;
-
- size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
- size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
- adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
- &adapter->queue_desc_pa,
- GFP_KERNEL);
-
- if (!adapter->tqd_start) {
- dev_err(&pdev->dev, "Failed to allocate memory\n");
- err = -ENOMEM;
- goto err_alloc_queue_desc;
- }
- adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
- adapter->num_tx_queues);
-
- adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
- sizeof(struct Vmxnet3_PMConf),
- &adapter->pm_conf_pa,
- GFP_KERNEL);
- if (adapter->pm_conf == NULL) {
- err = -ENOMEM;
- goto err_alloc_pm;
- }
-
-#ifdef VMXNET3_RSS
-
- adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
- sizeof(struct UPT1_RSSConf),
- &adapter->rss_conf_pa,
- GFP_KERNEL);
- if (adapter->rss_conf == NULL) {
- err = -ENOMEM;
- goto err_alloc_rss;
- }
-#endif /* VMXNET3_RSS */
-
err = vmxnet3_alloc_pci_resources(adapter);
if (err < 0)
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_4)) {
+ if (ver & (1 << VMXNET3_REV_6)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_6);
+ adapter->version = VMXNET3_REV_6 + 1;
+ } else if (ver & (1 << VMXNET3_REV_5)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_5);
+ adapter->version = VMXNET3_REV_5 + 1;
+ } else if (ver & (1 << VMXNET3_REV_4)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_4);
@@ -3529,6 +3528,77 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ if (VMXNET3_VERSION_GE_6(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (queues > 0) {
+ adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
+ adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
+ } else {
+ adapter->num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ adapter->num_tx_queues = min(num_tx_queues,
+ VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
+ }
+ if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
+ adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
+ adapter->queuesExtEnabled = true;
+ } else {
+ adapter->queuesExtEnabled = false;
+ }
+ } else {
+ adapter->queuesExtEnabled = false;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
+ adapter->num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ adapter->num_tx_queues = min(num_tx_queues,
+ VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
+ }
+ dev_info(&pdev->dev,
+ "# of Tx queues : %d, # of Rx queues : %d\n",
+ adapter->num_tx_queues, adapter->num_rx_queues);
+
+ adapter->rx_buf_per_pkt = 1;
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+ adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &adapter->queue_desc_pa,
+ GFP_KERNEL);
+
+ if (!adapter->tqd_start) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ err = -ENOMEM;
+ goto err_ver;
+ }
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+ adapter->num_tx_queues);
+
+ adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_PMConf),
+ &adapter->pm_conf_pa,
+ GFP_KERNEL);
+ if (adapter->pm_conf == NULL) {
+ err = -ENOMEM;
+ goto err_alloc_pm;
+ }
+
+#ifdef VMXNET3_RSS
+
+ adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct UPT1_RSSConf),
+ &adapter->rss_conf_pa,
+ GFP_KERNEL);
+ if (adapter->rss_conf == NULL) {
+ err = -ENOMEM;
+ goto err_alloc_rss;
+ }
+#endif /* VMXNET3_RSS */
+
if (VMXNET3_VERSION_GE_3(adapter)) {
adapter->coal_conf =
dma_alloc_coherent(&adapter->pdev->dev,
@@ -3538,7 +3608,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
GFP_KERNEL);
if (!adapter->coal_conf) {
err = -ENOMEM;
- goto err_ver;
+ goto err_coal_conf;
}
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
adapter->default_coal_mode = true;
@@ -3581,9 +3651,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- /* MTU range: 60 - 9000 */
+ /* MTU range: 60 - 9190 */
netdev->min_mtu = VMXNET3_MIN_MTU;
- netdev->max_mtu = VMXNET3_MAX_MTU;
+ if (VMXNET3_VERSION_GE_6(adapter))
+ netdev->max_mtu = VMXNET3_V6_MAX_MTU;
+ else
+ netdev->max_mtu = VMXNET3_MAX_MTU;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
@@ -3621,9 +3694,7 @@ err_register:
adapter->coal_conf, adapter->coal_conf_pa);
}
vmxnet3_free_intr_resources(adapter);
-err_ver:
- vmxnet3_free_pci_resources(adapter);
-err_alloc_pci:
+err_coal_conf:
#ifdef VMXNET3_RSS
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
@@ -3634,7 +3705,9 @@ err_alloc_rss:
err_alloc_pm:
dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
adapter->queue_desc_pa);
-err_alloc_queue_desc:
+err_ver:
+ vmxnet3_free_pci_resources(adapter);
+err_alloc_pci:
dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
@@ -3653,7 +3726,8 @@ vmxnet3_remove_device(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int size = 0;
- int num_rx_queues;
+ int num_rx_queues, rx_queues;
+ unsigned long flags;
#ifdef VMXNET3_RSS
if (enable_mq)
@@ -3662,7 +3736,24 @@ vmxnet3_remove_device(struct pci_dev *pdev)
else
#endif
num_rx_queues = 1;
- num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ if (!VMXNET3_VERSION_GE_6(adapter)) {
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
+ }
+ if (VMXNET3_VERSION_GE_6(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (rx_queues > 0)
+ rx_queues = (rx_queues >> 8) & 0xff;
+ else
+ rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ num_rx_queues = min(num_rx_queues, rx_queues);
+ } else {
+ num_rx_queues = min(num_rx_queues,
+ VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
+ }
cancel_work_sync(&adapter->work);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 1b483cf2b1ca..a3e2f2ba68b5 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -787,6 +787,10 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
+ if (VMXNET3_VERSION_GE_6(adapter) &&
+ (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
case SCTP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
@@ -871,6 +875,22 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev,
case ESP_V6_FLOW:
case AH_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (!VMXNET3_VERSION_GE_6(adapter))
+ return -EOPNOTSUPP;
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index e910596b79cf..7027ff483fa5 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -69,18 +69,20 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.6.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01050000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01060000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
+#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
@@ -301,15 +303,18 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
-#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */
+
+#define VMXNET3_DEVICE_DEFAULT_TX_QUEUES 8
+#define VMXNET3_DEVICE_DEFAULT_RX_QUEUES 8 /* Keep this value as a power of 2 */
/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
-#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
+#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for tx, 1 for rx pair and 1 for event */
struct vmxnet3_intr {
@@ -396,6 +401,7 @@ struct vmxnet3_adapter {
dma_addr_t adapter_pa;
dma_addr_t pm_conf_pa;
dma_addr_t rss_conf_pa;
+ bool queuesExtEnabled;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -421,6 +427,10 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_3 + 1)
#define VMXNET3_VERSION_GE_4(adapter) \
(adapter->version >= VMXNET3_REV_4 + 1)
+#define VMXNET3_VERSION_GE_5(adapter) \
+ (adapter->version >= VMXNET3_REV_5 + 1)
+#define VMXNET3_VERSION_GE_6(adapter) \
+ (adapter->version >= VMXNET3_REV_6 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 8bbe2a7bb141..bf2fac913942 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -857,30 +857,24 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
bool is_v6gw = false;
- int ret = -EINVAL;
nf_reset_ct(skb);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
- struct sk_buff *skb2;
-
- skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
- if (!skb2) {
- ret = -ENOMEM;
- goto err;
+ skb = skb_expand_head(skb, hh_len);
+ if (!skb) {
+ dev->stats.tx_errors++;
+ return -ENOMEM;
}
- if (skb->sk)
- skb_set_owner_w(skb2, skb->sk);
-
- consume_skb(skb);
- skb = skb2;
}
rcu_read_lock_bh();
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
if (!IS_ERR(neigh)) {
+ int ret;
+
sock_confirm_neigh(skb, neigh);
/* if crossing protocols, can not use the cached header */
ret = neigh_output(neigh, skb, is_v6gw);
@@ -889,9 +883,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
}
rcu_read_unlock_bh();
-err:
vrf_tx_error(skb->dev, skb);
- return ret;
+ return -EINVAL;
}
static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 473df2505c8e..592a8389fc5a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -290,30 +290,6 @@ config SLIC_DS26522
To compile this driver as a module, choose M here: the
module will be called slic_ds26522.
-config DSCC4_PCISYNC
- bool "Etinc PCISYNC features"
- depends on DSCC4
- help
- Due to Etinc's design choice for its PCISYNC cards, some operations
- are only allowed on specific ports of the DSCC4. This option is the
- only way for the driver to know that it shouldn't return a success
- code for these operations.
-
- Please say Y if your card is an Etinc's PCISYNC.
-
-config DSCC4_PCI_RST
- bool "Hard reset support"
- depends on DSCC4
- help
- Various DSCC4 bugs forbid any reliable software reset of the ASIC.
- As a replacement, some vendors provide a way to assert the PCI #RST
- pin of DSCC4 through the GPIO port of the card. If you choose Y,
- the driver will make use of this feature before module removal
- (i.e. rmmod). The feature is known to be available on Commtech's
- cards. Contact your manufacturer for details.
-
- Say Y if your card supports this feature.
-
config IXP4XX_HSS
tristate "Intel IXP4xx HSS (synchronous serial port) support"
depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR
@@ -337,33 +313,6 @@ config LAPBETHER
To compile this driver as a module, choose M here: the
module will be called lapbether.
- If unsure, say N.
-
-config SBNI
- tristate "Granch SBNI12 Leased Line adapter support"
- depends on X86
- help
- Driver for ISA SBNI12-xx cards which are low cost alternatives to
- leased line modems.
-
- You can find more information and last versions of drivers and
- utilities at <http://www.granch.ru/>. If you have any question you
- can send email to <sbni@granch.ru>.
-
- To compile this driver as a module, choose M here: the
- module will be called sbni.
-
- If unsure, say N.
-
-config SBNI_MULTILINE
- bool "Multiple line feature support"
- depends on SBNI
- help
- Schedule traffic for some parallel lines, via SBNI12 adapters.
-
- If you have two computers connected with two parallel lines it's
- possible to increase transfer rate nearly twice. You should have
- a program named 'sbniconfig' to configure adapters.
If unsure, say N.
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 081666c36ca2..f6b92efffc94 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_FARSYNC) += farsync.o
obj-$(CONFIG_LANMEDIA) += lmc/
obj-$(CONFIG_LAPBETHER) += lapbether.o
-obj-$(CONFIG_SBNI) += sbni.o
obj-$(CONFIG_N2) += n2.o
obj-$(CONFIG_C101) += c101.o
obj-$(CONFIG_WANXL) += wanxl.o
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 059c2f7133be..8dd14d916c3a 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -208,14 +208,12 @@ static int c101_close(struct net_device *dev)
return 0;
}
-static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int c101_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+#ifdef DEBUG_RINGS
port_t *port = dev_to_port(dev);
-#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
@@ -226,14 +224,22 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
- switch (ifr->ifr_settings.type) {
+ return -EOPNOTSUPP;
+}
+
+static int c101_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
+
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -261,7 +267,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -286,7 +292,8 @@ static const struct net_device_ops c101_ops = {
.ndo_open = c101_open,
.ndo_stop = c101_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = c101_ioctl,
+ .ndo_siocwandev = c101_ioctl,
+ .ndo_siocdevprivate = c101_siocdevprivate,
};
static int __init c101_run(unsigned long irq, unsigned long winbase)
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 43caab0b7dee..23d2954d9747 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -267,7 +267,6 @@ static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
static char *cosa_net_setup_rx(struct channel_data *channel, int size);
static int cosa_net_rx_done(struct channel_data *channel);
static int cosa_net_tx_done(struct channel_data *channel, int size);
-static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Character device */
static char *chrdev_setup_rx(struct channel_data *channel, int size);
@@ -415,7 +414,7 @@ static const struct net_device_ops cosa_ops = {
.ndo_open = cosa_net_open,
.ndo_stop = cosa_net_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = cosa_net_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
.ndo_tx_timeout = cosa_net_timeout,
};
@@ -1169,18 +1168,6 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
return -ENOIOCTLCMD;
}
-static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- int rv;
- struct channel_data *chan = dev_to_chan(dev);
-
- rv = cosa_ioctl_common(chan->cosa, chan, cmd,
- (unsigned long)ifr->ifr_data);
- if (rv != -ENOIOCTLCMD)
- return rv;
- return hdlc_ioctl(dev, ifr, cmd);
-}
-
static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index b3466e084e84..6a212c085435 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1784,16 +1784,15 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
static int
fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
- struct ifreq *ifr)
+ struct if_settings *ifs)
{
sync_serial_settings sync;
int i;
- if (ifr->ifr_settings.size != sizeof(sync))
+ if (ifs->size != sizeof(sync))
return -ENOMEM;
- if (copy_from_user
- (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof(sync)))
+ if (copy_from_user(&sync, ifs->ifs_ifsu.sync, sizeof(sync)))
return -EFAULT;
if (sync.loopback)
@@ -1801,7 +1800,7 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
i = port->index;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_IFACE_V35:
FST_WRW(card, portConfig[i].lineInterface, V35);
port->hwif = V35;
@@ -1857,7 +1856,7 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
static int
fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
- struct ifreq *ifr)
+ struct if_settings *ifs)
{
sync_serial_settings sync;
int i;
@@ -1868,29 +1867,29 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
*/
switch (port->hwif) {
case E1:
- ifr->ifr_settings.type = IF_IFACE_E1;
+ ifs->type = IF_IFACE_E1;
break;
case T1:
- ifr->ifr_settings.type = IF_IFACE_T1;
+ ifs->type = IF_IFACE_T1;
break;
case V35:
- ifr->ifr_settings.type = IF_IFACE_V35;
+ ifs->type = IF_IFACE_V35;
break;
case V24:
- ifr->ifr_settings.type = IF_IFACE_V24;
+ ifs->type = IF_IFACE_V24;
break;
case X21D:
- ifr->ifr_settings.type = IF_IFACE_X21D;
+ ifs->type = IF_IFACE_X21D;
break;
case X21:
default:
- ifr->ifr_settings.type = IF_IFACE_X21;
+ ifs->type = IF_IFACE_X21;
break;
}
- if (ifr->ifr_settings.size == 0)
+ if (!ifs->size)
return 0; /* only type requested */
- if (ifr->ifr_settings.size < sizeof(sync))
+ if (ifs->size < sizeof(sync))
return -ENOMEM;
i = port->index;
@@ -1901,15 +1900,15 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof(sync)))
+ if (copy_to_user(ifs->ifs_ifsu.sync, &sync, sizeof(sync)))
return -EFAULT;
- ifr->ifr_settings.size = sizeof(sync);
+ ifs->size = sizeof(sync);
return 0;
}
static int
-fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+fst_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct fst_card_info *card;
struct fst_port_info *port;
@@ -1918,7 +1917,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
unsigned long flags;
void *buf;
- dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
+ dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, data);
port = dev_to_port(dev);
card = port->card;
@@ -1942,11 +1941,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* First copy in the header with the length and offset of data
* to write
*/
- if (!ifr->ifr_data)
+ if (!data)
return -EINVAL;
- if (copy_from_user(&wrthdr, ifr->ifr_data,
- sizeof(struct fstioc_write)))
+ if (copy_from_user(&wrthdr, data, sizeof(struct fstioc_write)))
return -EFAULT;
/* Sanity check the parameters. We don't support partial writes
@@ -1958,7 +1956,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Now copy the data to the card. */
- buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
+ buf = memdup_user(data + sizeof(struct fstioc_write),
wrthdr.size);
if (IS_ERR(buf))
return PTR_ERR(buf);
@@ -1991,12 +1989,12 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
- if (!ifr->ifr_data)
+ if (!data)
return -EINVAL;
gather_conf_info(card, port, &info);
- if (copy_to_user(ifr->ifr_data, &info, sizeof(info)))
+ if (copy_to_user(data, &info, sizeof(info)))
return -EFAULT;
return 0;
@@ -2011,46 +2009,58 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
card->card_no, card->state);
return -EIO;
}
- if (copy_from_user(&info, ifr->ifr_data, sizeof(info)))
+ if (copy_from_user(&info, data, sizeof(info)))
return -EFAULT;
return set_conf_from_info(card, port, &info);
+ default:
+ return -EINVAL;
+ }
+}
- case SIOCWANDEV:
- switch (ifr->ifr_settings.type) {
- case IF_GET_IFACE:
- return fst_get_iface(card, port, ifr);
-
- case IF_IFACE_SYNC_SERIAL:
- case IF_IFACE_V35:
- case IF_IFACE_V24:
- case IF_IFACE_X21:
- case IF_IFACE_X21D:
- case IF_IFACE_T1:
- case IF_IFACE_E1:
- return fst_set_iface(card, port, ifr);
-
- case IF_PROTO_RAW:
- port->mode = FST_RAW;
- return 0;
+static int
+fst_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ struct fst_card_info *card;
+ struct fst_port_info *port;
- case IF_GET_PROTO:
- if (port->mode == FST_RAW) {
- ifr->ifr_settings.type = IF_PROTO_RAW;
- return 0;
- }
- return hdlc_ioctl(dev, ifr, cmd);
+ dbg(DBG_IOCTL, "SIOCDEVPRIVATE, %x\n", ifs->type);
- default:
- port->mode = FST_GEN_HDLC;
- dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
- ifr->ifr_settings.type);
- return hdlc_ioctl(dev, ifr, cmd);
+ port = dev_to_port(dev);
+ card = port->card;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (ifs->type) {
+ case IF_GET_IFACE:
+ return fst_get_iface(card, port, ifs);
+
+ case IF_IFACE_SYNC_SERIAL:
+ case IF_IFACE_V35:
+ case IF_IFACE_V24:
+ case IF_IFACE_X21:
+ case IF_IFACE_X21D:
+ case IF_IFACE_T1:
+ case IF_IFACE_E1:
+ return fst_set_iface(card, port, ifs);
+
+ case IF_PROTO_RAW:
+ port->mode = FST_RAW;
+ return 0;
+
+ case IF_GET_PROTO:
+ if (port->mode == FST_RAW) {
+ ifs->type = IF_PROTO_RAW;
+ return 0;
}
+ return hdlc_ioctl(dev, ifs);
default:
- /* Not one of ours. Pass through to HDLC package */
- return hdlc_ioctl(dev, ifr, cmd);
+ port->mode = FST_GEN_HDLC;
+ dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
+ ifs->type);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -2310,7 +2320,8 @@ static const struct net_device_ops fst_ops = {
.ndo_open = fst_open,
.ndo_stop = fst_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = fst_ioctl,
+ .ndo_siocwandev = fst_ioctl,
+ .ndo_siocdevprivate = fst_siocdevprivate,
.ndo_tx_timeout = fst_tx_timeout,
};
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 39f05fabbfa4..cda1b4ce6b21 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -674,31 +674,28 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(te1_settings);
te1_settings line;
struct ucc_hdlc_private *priv = netdev_priv(dev);
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_E1;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_E1;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
line.clock_type = priv->clocking;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1053,7 +1050,7 @@ static const struct net_device_ops uhdlc_ops = {
.ndo_open = uhdlc_open,
.ndo_stop = uhdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = uhdlc_ioctl,
+ .ndo_siocwandev = uhdlc_ioctl,
.ndo_tx_timeout = uhdlc_tx_timeout,
};
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index dd6312b69861..cbed10b1d862 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -196,16 +196,13 @@ void hdlc_close(struct net_device *dev)
}
EXPORT_SYMBOL(hdlc_close);
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct hdlc_proto *proto = first_proto;
int result;
- if (cmd != SIOCWANDEV)
- return -EINVAL;
-
if (dev_to_hdlc(dev)->proto) {
- result = dev_to_hdlc(dev)->proto->ioctl(dev, ifr);
+ result = dev_to_hdlc(dev)->proto->ioctl(dev, ifs);
if (result != -EINVAL)
return result;
}
@@ -213,7 +210,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Not handled by currently attached protocol (if any) */
while (proto) {
- result = proto->ioctl(dev, ifr);
+ result = proto->ioctl(dev, ifs);
if (result != -EINVAL)
return result;
proto = proto->next;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index c54fdae950fb..cdebe65a7e2d 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -56,7 +56,7 @@ struct cisco_state {
u32 rxseq; /* RX sequence number */
};
-static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline struct cisco_state *state(hdlc_device *hdlc)
{
@@ -306,21 +306,21 @@ static const struct header_ops cisco_header_ops = {
.create = cisco_hard_header,
};
-static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
+ cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco;
const size_t size = sizeof(cisco_proto);
cisco_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_CISCO;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_CISCO;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 25e3564ce118..7637edce443e 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -146,7 +146,7 @@ struct frad_state {
u8 rxseq; /* RX sequence number */
};
-static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int fr_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline u16 q922_to_dlci(u8 *hdr)
{
@@ -357,26 +357,26 @@ static int pvc_close(struct net_device *dev)
return 0;
}
-static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pvc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct pvc_device *pvc = dev->ml_priv;
fr_proto_pvc_info info;
- if (ifr->ifr_settings.type == IF_GET_PROTO) {
+ if (ifs->type == IF_GET_PROTO) {
if (dev->type == ARPHRD_ETHER)
- ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
+ ifs->type = IF_PROTO_FR_ETH_PVC;
else
- ifr->ifr_settings.type = IF_PROTO_FR_PVC;
+ ifs->type = IF_PROTO_FR_PVC;
- if (ifr->ifr_settings.size < sizeof(info)) {
+ if (ifs->size < sizeof(info)) {
/* data size wanted */
- ifr->ifr_settings.size = sizeof(info);
+ ifs->size = sizeof(info);
return -ENOBUFS;
}
info.dlci = pvc->dlci;
memcpy(info.master, pvc->frad->name, IFNAMSIZ);
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
+ if (copy_to_user(ifs->ifs_ifsu.fr_pvc_info,
&info, sizeof(info)))
return -EFAULT;
return 0;
@@ -1056,7 +1056,7 @@ static const struct net_device_ops pvc_ops = {
.ndo_open = pvc_open,
.ndo_stop = pvc_close,
.ndo_start_xmit = pvc_xmit,
- .ndo_do_ioctl = pvc_ioctl,
+ .ndo_siocwandev = pvc_ioctl,
};
static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
@@ -1179,22 +1179,22 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int fr_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
+ fr_proto __user *fr_s = ifs->ifs_ifsu.fr;
const size_t size = sizeof(fr_proto);
fr_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
fr_proto_pvc pvc;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_FR;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_FR;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(fr_s, &state(hdlc)->settings, size))
@@ -1256,21 +1256,21 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
+ if (copy_from_user(&pvc, ifs->ifs_ifsu.fr_pvc,
sizeof(fr_proto_pvc)))
return -EFAULT;
if (pvc.dlci <= 0 || pvc.dlci >= 1024)
return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
- if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
- ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
+ if (ifs->type == IF_PROTO_FR_ADD_ETH_PVC ||
+ ifs->type == IF_PROTO_FR_DEL_ETH_PVC)
result = ARPHRD_ETHER; /* bridged Ethernet device */
else
result = ARPHRD_DLCI;
- if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
- ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
+ if (ifs->type == IF_PROTO_FR_ADD_PVC ||
+ ifs->type == IF_PROTO_FR_ADD_ETH_PVC)
return fr_add_pvc(dev, pvc.dlci, result);
else
return fr_del_pvc(hdlc, pvc.dlci, result);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81ecf432a0c..37a3c989cba1 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -100,7 +100,7 @@ static const char *const event_names[EVENTS] = {
static struct sk_buff_head tx_queue; /* used when holding the spin lock */
-static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline struct ppp *get_ppp(struct net_device *dev)
{
@@ -655,17 +655,17 @@ static const struct header_ops ppp_header_ops = {
.create = ppp_hard_header,
};
-static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ppp *ppp;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_PPP;
+ ifs->type = IF_PROTO_PPP;
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_PPP:
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 54d28496fefd..4a2f068721bc 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -19,7 +19,7 @@
#include <linux/skbuff.h>
-static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int raw_ioctl(struct net_device *dev, struct if_settings *ifs);
static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
{
@@ -33,21 +33,21 @@ static struct hdlc_proto proto = {
};
-static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int raw_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_HDLC;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_HDLC;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 927596276a07..0a66b7356405 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -20,7 +20,7 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
-static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs);
static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev)
{
@@ -48,22 +48,22 @@ static struct hdlc_proto proto = {
};
-static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
unsigned int old_qlen;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_HDLC_ETH;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_HDLC_ETH;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 9b7ebf8bd85c..f72c92c24003 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -29,7 +29,7 @@ struct x25_state {
struct tasklet_struct rx_tasklet;
};
-static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+static int x25_ioctl(struct net_device *dev, struct if_settings *ifs);
static struct x25_state *state(hdlc_device *hdlc)
{
@@ -274,21 +274,21 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int x25_ioctl(struct net_device *dev, struct if_settings *ifs)
{
- x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
+ x25_hdlc_proto __user *x25_s = ifs->ifs_ifsu.x25;
const size_t size = sizeof(x25_hdlc_proto);
hdlc_device *hdlc = dev_to_hdlc(dev);
x25_hdlc_proto new_settings;
int result;
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
- ifr->ifr_settings.type = IF_PROTO_X25;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_PROTO_X25;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(x25_s, &state(hdlc)->settings, size))
@@ -303,7 +303,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EBUSY;
/* backward compatibility */
- if (ifr->ifr_settings.size == 0) {
+ if (ifs->size == 0) {
new_settings.dce = 0;
new_settings.modulo = 8;
new_settings.window = 7;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index fd61a7cc4fdf..e985e54ba75d 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -142,11 +142,6 @@ static int hostess_close(struct net_device *d)
return 0;
}
-static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
-{
- return hdlc_ioctl(d, ifr, cmd);
-}
-
/* Passed network frames, fire them downwind.
*/
@@ -171,7 +166,7 @@ static const struct net_device_ops hostess_ops = {
.ndo_open = hostess_open,
.ndo_stop = hostess_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hostess_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
};
static struct z8530_dev *sv11_init(int iobase, int irq)
@@ -324,16 +319,18 @@ MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
static struct z8530_dev *sv11_unit;
-int init_module(void)
+static int sv11_module_init(void)
{
sv11_unit = sv11_init(io, irq);
if (!sv11_unit)
return -ENODEV;
return 0;
}
+module_init(sv11_module_init);
-void cleanup_module(void)
+static void sv11_module_cleanup(void)
{
if (sv11_unit)
sv11_shutdown(sv11_unit);
}
+module_exit(sv11_module_cleanup);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 3c51ab239fb2..88a36a069311 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -975,11 +975,10 @@ static int init_hdlc_queues(struct port *port)
return -ENOMEM;
}
- port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys);
if (!port->desc_tab)
return -ENOMEM;
- memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
@@ -1255,23 +1254,20 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
}
}
-static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
struct port *port = dev_to_port(dev);
unsigned long flags;
int clk;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_V35;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_V35;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&new_line, 0, sizeof(new_line));
@@ -1324,7 +1320,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1336,7 +1332,7 @@ static const struct net_device_ops hss_hdlc_ops = {
.ndo_open = hss_hdlc_open,
.ndo_stop = hss_hdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hss_hdlc_ioctl,
+ .ndo_siocwandev = hss_hdlc_ioctl,
};
static int hss_init_one(struct platform_device *pdev)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 3bd541c868d5..d7d59b4595f9 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -19,7 +19,7 @@ void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
-int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int lmc_ioctl(struct net_device *dev, struct if_settings *ifs);
extern lmc_media_t lmc_ds3_media;
extern lmc_media_t lmc_ssi_media;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6c163db52835..ed687bf6ec47 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -105,7 +105,8 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
* linux reserves 16 device specific IOCTLs. We call them
* LMCIOC* to control various bits of our world.
*/
-int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
+static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
lmc_ctl_t ctl;
@@ -124,7 +125,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
* To date internally, just copy this out to the user.
*/
case LMCIOCGINFO: /*fold01*/
- if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
+ if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
ret = -EFAULT;
else
ret = 0;
@@ -141,7 +142,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
+ if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
ret = -EFAULT;
break;
}
@@ -171,7 +172,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
+ if (copy_from_user(&new_type, data, sizeof(u16))) {
ret = -EFAULT;
break;
}
@@ -211,8 +212,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
- if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
- sizeof(struct lmc_xinfo)))
+ if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
ret = -EFAULT;
else
ret = 0;
@@ -245,9 +245,9 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
regVal & T1FRAMER_SEF_MASK;
}
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
+ if (copy_to_user(data, &sc->lmc_device->stats,
sizeof(sc->lmc_device->stats)) ||
- copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
+ copy_to_user(data + sizeof(sc->lmc_device->stats),
&sc->extra_stats, sizeof(sc->extra_stats)))
ret = -EFAULT;
else
@@ -282,7 +282,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
+ if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
ret = -EFAULT;
break;
}
@@ -314,11 +314,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
#ifdef DEBUG
case LMCIOCDUMPEVENTLOG:
- if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
+ if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
ret = -EFAULT;
break;
}
- if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
+ if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
sizeof(lmcEventLogBuf)))
ret = -EFAULT;
else
@@ -346,7 +346,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
*/
netif_stop_queue(dev);
- if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
+ if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
ret = -EFAULT;
break;
}
@@ -609,10 +609,8 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
}
break;
- default: /*fold01*/
- /* If we don't know what to do, give the protocol a shot. */
- ret = lmc_proto_ioctl (sc, ifr, cmd);
- break;
+ default:
+ break;
}
return ret;
@@ -788,7 +786,8 @@ static const struct net_device_ops lmc_ops = {
.ndo_open = lmc_open,
.ndo_stop = lmc_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = lmc_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
+ .ndo_siocdevprivate = lmc_siocdevprivate,
.ndo_tx_timeout = lmc_driver_timeout,
.ndo_get_stats = lmc_get_stats,
};
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 4e9cc83b615a..e5487616a816 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -58,13 +58,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
}
}
-int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
-{
- if (sc->if_type == LMC_PPP)
- return hdlc_ioctl(sc->lmc_device, ifr, cmd);
- return -EOPNOTSUPP;
-}
-
int lmc_proto_open(lmc_softc_t *sc)
{
int ret = 0;
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index bb098e443776..e56e7072de44 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -5,7 +5,6 @@
#include <linux/hdlc.h>
void lmc_proto_attach(lmc_softc_t *sc);
-int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
int lmc_proto_open(lmc_softc_t *sc);
void lmc_proto_close(lmc_softc_t *sc);
__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index bdb6dc2409bc..f3e80722ba1d 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -227,27 +227,30 @@ static int n2_close(struct net_device *dev)
return 0;
}
-static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int n2_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
- port_t *port = dev_to_port(dev);
-
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+static int n2_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -275,7 +278,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -311,7 +314,8 @@ static const struct net_device_ops n2_ops = {
.ndo_open = n2_open,
.ndo_stop = n2_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = n2_ioctl,
+ .ndo_siocwandev = n2_ioctl,
+ .ndo_siocdevprivate = n2_siocdevprivate,
};
static int __init n2_run(unsigned long io, unsigned long irq,
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 7b123a771aa6..4766446f0fa0 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -174,27 +174,30 @@ static int pc300_close(struct net_device *dev)
return 0;
}
-static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pc300_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
- int new_type;
- port_t *port = dev_to_port(dev);
-
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+static int pc300_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ int new_type;
+ port_t *port = dev_to_port(dev);
- if (ifr->ifr_settings.type == IF_GET_IFACE) {
- ifr->ifr_settings.type = port->iface;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ if (ifs->type == IF_GET_IFACE) {
+ ifs->type = port->iface;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -203,21 +206,21 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
if (port->card->type == PC300_X21 &&
- (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
- ifr->ifr_settings.type == IF_IFACE_X21))
+ (ifs->type == IF_IFACE_SYNC_SERIAL ||
+ ifs->type == IF_IFACE_X21))
new_type = IF_IFACE_X21;
else if (port->card->type == PC300_RSV &&
- (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
- ifr->ifr_settings.type == IF_IFACE_V35))
+ (ifs->type == IF_IFACE_SYNC_SERIAL ||
+ ifs->type == IF_IFACE_V35))
new_type = IF_IFACE_V35;
else if (port->card->type == PC300_RSV &&
- ifr->ifr_settings.type == IF_IFACE_V24)
+ ifs->type == IF_IFACE_V24)
new_type = IF_IFACE_V24;
else
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -272,7 +275,8 @@ static const struct net_device_ops pc300_ops = {
.ndo_open = pc300_open,
.ndo_stop = pc300_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = pc300_ioctl,
+ .ndo_siocwandev = pc300_ioctl,
+ .ndo_siocdevprivate = pc300_siocdevprivate,
};
static int pc300_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index dee9c4e15eca..ea86c7035653 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -167,27 +167,30 @@ static int pci200_close(struct net_device *dev)
return 0;
}
-static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pci200_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
{
- const size_t size = sizeof(sync_serial_settings);
- sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
- port_t *port = dev_to_port(dev);
-
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+}
+
+static int pci200_ioctl(struct net_device *dev, struct if_settings *ifs)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
+ port_t *port = dev_to_port(dev);
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_V35;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_V35;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
@@ -217,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -253,7 +256,8 @@ static const struct net_device_ops pci200_ops = {
.ndo_open = pci200_open,
.ndo_stop = pci200_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = pci200_ioctl,
+ .ndo_siocwandev = pci200_ioctl,
+ .ndo_siocdevprivate = pci200_siocdevprivate,
};
static int pci200_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
deleted file mode 100644
index 3092a09d3eaa..000000000000
--- a/drivers/net/wan/sbni.c
+++ /dev/null
@@ -1,1638 +0,0 @@
-/* sbni.c: Granch SBNI12 leased line adapters driver for linux
- *
- * Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
- *
- * Previous versions were written by Yaroslav Polyakov,
- * Alexey Zverev and Max Khon.
- *
- * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
- * double-channel, PCI and ISA modifications.
- * More info and useful utilities to work with SBNI12 cards you can find
- * at http://www.granch.com (English) or http://www.granch.ru (Russian)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License.
- *
- *
- * 5.0.1 Jun 22 2001
- * - Fixed bug in probe
- * 5.0.0 Jun 06 2001
- * - Driver was completely redesigned by Denis I.Timofeev,
- * - now PCI/Dual, ISA/Dual (with single interrupt line) models are
- * - supported
- * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
- * - PCI cards support
- * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
- * - Completely rebuilt all the packet storage system
- * - to work in Ethernet-like style.
- * 3.1.1 just fixed some bugs (5 aug 1999)
- * 3.1.0 added balancing feature (26 apr 1999)
- * 3.0.1 just fixed some bugs (14 apr 1999).
- * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
- * - added pre-calculation for CRC, fixed bug with "len-2" frames,
- * - removed outbound fragmentation (MTU=1000), written CRC-calculation
- * - on asm, added work with hard_headers and now we have our own cache
- * - for them, optionally supported word-interchange on some chipsets,
- *
- * Known problem: this driver wasn't tested on multiprocessor machine.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/fcntl.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include <net/net_namespace.h>
-#include <net/arp.h>
-#include <net/Space.h>
-
-#include <asm/io.h>
-#include <asm/types.h>
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-
-#include "sbni.h"
-
-/* device private data */
-
-struct net_local {
- struct timer_list watchdog;
- struct net_device *watchdog_dev;
-
- spinlock_t lock;
- struct sk_buff *rx_buf_p; /* receive buffer ptr */
- struct sk_buff *tx_buf_p; /* transmit buffer ptr */
-
- unsigned int framelen; /* current frame length */
- unsigned int maxframe; /* maximum valid frame length */
- unsigned int state;
- unsigned int inppos, outpos; /* positions in rx/tx buffers */
-
- /* transmitting frame number - from frames qty to 1 */
- unsigned int tx_frameno;
-
- /* expected number of next receiving frame */
- unsigned int wait_frameno;
-
- /* count of failed attempts to frame send - 32 attempts do before
- error - while receiver tunes on opposite side of wire */
- unsigned int trans_errors;
-
- /* idle time; send pong when limit exceeded */
- unsigned int timer_ticks;
-
- /* fields used for receive level autoselection */
- int delta_rxl;
- unsigned int cur_rxl_index, timeout_rxl;
- unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
-
- struct sbni_csr1 csr1; /* current value of CSR1 */
- struct sbni_in_stats in_stats; /* internal statistics */
-
- struct net_device *second; /* for ISA/dual cards */
-
-#ifdef CONFIG_SBNI_MULTILINE
- struct net_device *master;
- struct net_device *link;
-#endif
-};
-
-
-static int sbni_card_probe( unsigned long );
-static int sbni_pci_probe( struct net_device * );
-static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
-static int sbni_open( struct net_device * );
-static int sbni_close( struct net_device * );
-static netdev_tx_t sbni_start_xmit(struct sk_buff *,
- struct net_device * );
-static int sbni_ioctl( struct net_device *, struct ifreq *, int );
-static void set_multicast_list( struct net_device * );
-
-static irqreturn_t sbni_interrupt( int, void * );
-static void handle_channel( struct net_device * );
-static int recv_frame( struct net_device * );
-static void send_frame( struct net_device * );
-static int upload_data( struct net_device *,
- unsigned, unsigned, unsigned, u32 );
-static void download_data( struct net_device *, u32 * );
-static void sbni_watchdog(struct timer_list *);
-static void interpret_ack( struct net_device *, unsigned );
-static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
-static void indicate_pkt( struct net_device * );
-static void card_start( struct net_device * );
-static void prepare_to_send( struct sk_buff *, struct net_device * );
-static void drop_xmit_queue( struct net_device * );
-static void send_frame_header( struct net_device *, u32 * );
-static int skip_tail( unsigned int, unsigned int, u32 );
-static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
-static void change_level( struct net_device * );
-static void timeout_change_level( struct net_device * );
-static u32 calc_crc32( u32, u8 *, u32 );
-static struct sk_buff * get_rx_buf( struct net_device * );
-static int sbni_init( struct net_device * );
-
-#ifdef CONFIG_SBNI_MULTILINE
-static int enslave( struct net_device *, struct net_device * );
-static int emancipate( struct net_device * );
-#endif
-
-static const char version[] =
- "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
-
-static bool skip_pci_probe __initdata = false;
-static int scandone __initdata = 0;
-static int num __initdata = 0;
-
-static unsigned char rxl_tab[];
-static u32 crc32tab[];
-
-/* A list of all installed devices, for removing the driver module. */
-static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
-
-/* Lists of device's parameters */
-static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
- { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
-static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
-static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
-static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
- { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
-static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
-
-#ifndef MODULE
-typedef u32 iarr[];
-static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
-#endif
-
-/* A zero-terminated list of I/O addresses to be probed on ISA bus */
-static unsigned int netcard_portlist[ ] __initdata = {
- 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
- 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
- 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
- 0 };
-
-#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
-
-/*
- * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
- * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
- */
-
-static inline int __init
-sbni_isa_probe( struct net_device *dev )
-{
- if( dev->base_addr > 0x1ff &&
- request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
- sbni_probe1( dev, dev->base_addr, dev->irq ) )
-
- return 0;
- else {
- pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
- dev->base_addr);
- return -ENODEV;
- }
-}
-
-static const struct net_device_ops sbni_netdev_ops = {
- .ndo_open = sbni_open,
- .ndo_stop = sbni_close,
- .ndo_start_xmit = sbni_start_xmit,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = sbni_ioctl,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void __init sbni_devsetup(struct net_device *dev)
-{
- ether_setup( dev );
- dev->netdev_ops = &sbni_netdev_ops;
-}
-
-int __init sbni_probe(int unit)
-{
- struct net_device *dev;
- int err;
-
- dev = alloc_netdev(sizeof(struct net_local), "sbni",
- NET_NAME_UNKNOWN, sbni_devsetup);
- if (!dev)
- return -ENOMEM;
-
- dev->netdev_ops = &sbni_netdev_ops;
-
- sprintf(dev->name, "sbni%d", unit);
- netdev_boot_setup_check(dev);
-
- err = sbni_init(dev);
- if (err) {
- free_netdev(dev);
- return err;
- }
-
- err = register_netdev(dev);
- if (err) {
- release_region( dev->base_addr, SBNI_IO_EXTENT );
- free_netdev(dev);
- return err;
- }
- pr_info_once("%s", version);
- return 0;
-}
-
-static int __init sbni_init(struct net_device *dev)
-{
- int i;
- if( dev->base_addr )
- return sbni_isa_probe( dev );
- /* otherwise we have to perform search our adapter */
-
- if( io[ num ] != -1 ) {
- dev->base_addr = io[ num ];
- dev->irq = irq[ num ];
- } else if( scandone || io[ 0 ] != -1 ) {
- return -ENODEV;
- }
-
- /* if io[ num ] contains non-zero address, then that is on ISA bus */
- if( dev->base_addr )
- return sbni_isa_probe( dev );
-
- /* ...otherwise - scan PCI first */
- if( !skip_pci_probe && !sbni_pci_probe( dev ) )
- return 0;
-
- if( io[ num ] == -1 ) {
- /* Auto-scan will be stopped when first ISA card were found */
- scandone = 1;
- if( num > 0 )
- return -ENODEV;
- }
-
- for( i = 0; netcard_portlist[ i ]; ++i ) {
- int ioaddr = netcard_portlist[ i ];
- if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
- sbni_probe1( dev, ioaddr, 0 ))
- return 0;
- }
-
- return -ENODEV;
-}
-
-
-static int __init
-sbni_pci_probe( struct net_device *dev )
-{
- struct pci_dev *pdev = NULL;
-
- while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
- != NULL ) {
- int pci_irq_line;
- unsigned long pci_ioaddr;
-
- if( pdev->vendor != SBNI_PCI_VENDOR &&
- pdev->device != SBNI_PCI_DEVICE )
- continue;
-
- pci_ioaddr = pci_resource_start( pdev, 0 );
- pci_irq_line = pdev->irq;
-
- /* Avoid already found cards from previous calls */
- if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
- if (pdev->subsystem_device != 2)
- continue;
-
- /* Dual adapter is present */
- if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
- dev->name ) )
- continue;
- }
-
- if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
- pr_warn(
-"WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
-"You should use the PCI BIOS setup to assign a valid IRQ line.\n",
- pci_irq_line );
-
- /* avoiding re-enable dual adapters */
- if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
- release_region( pci_ioaddr, SBNI_IO_EXTENT );
- pci_dev_put( pdev );
- return -EIO;
- }
- if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
- SET_NETDEV_DEV(dev, &pdev->dev);
- /* not the best thing to do, but this is all messed up
- for hotplug systems anyway... */
- pci_dev_put( pdev );
- return 0;
- }
- }
- return -ENODEV;
-}
-
-
-static struct net_device * __init
-sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
-{
- struct net_local *nl;
-
- if( sbni_card_probe( ioaddr ) ) {
- release_region( ioaddr, SBNI_IO_EXTENT );
- return NULL;
- }
-
- outb( 0, ioaddr + CSR0 );
-
- if( irq < 2 ) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- outb( EN_INT | TR_REQ, ioaddr + CSR0 );
- outb( PR_RES, ioaddr + CSR1 );
- mdelay(50);
- irq = probe_irq_off(irq_mask);
- outb( 0, ioaddr + CSR0 );
-
- if( !irq ) {
- pr_err("%s: can't detect device irq!\n", dev->name);
- release_region( ioaddr, SBNI_IO_EXTENT );
- return NULL;
- }
- } else if( irq == 2 )
- irq = 9;
-
- dev->irq = irq;
- dev->base_addr = ioaddr;
-
- /* Fill in sbni-specific dev fields. */
- nl = netdev_priv(dev);
- if( !nl ) {
- pr_err("%s: unable to get memory!\n", dev->name);
- release_region( ioaddr, SBNI_IO_EXTENT );
- return NULL;
- }
-
- memset( nl, 0, sizeof(struct net_local) );
- spin_lock_init( &nl->lock );
-
- /* store MAC address (generate if that isn't known) */
- *(__be16 *)dev->dev_addr = htons( 0x00ff );
- *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
- ((mac[num] ?
- mac[num] :
- (u32)((long)netdev_priv(dev))) & 0x00ffffff));
-
- /* store link settings (speed, receive level ) */
- nl->maxframe = DEFAULT_FRAME_LEN;
- nl->csr1.rate = baud[ num ];
-
- if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) {
- /* autotune rxl */
- nl->cur_rxl_index = DEF_RXL;
- nl->delta_rxl = DEF_RXL_DELTA;
- } else {
- nl->delta_rxl = 0;
- }
- nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
- if( inb( ioaddr + CSR0 ) & 0x01 )
- nl->state |= FL_SLOW_MODE;
-
- pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
- dev->name, dev->base_addr, dev->irq,
- ((u8 *)dev->dev_addr)[3],
- ((u8 *)dev->dev_addr)[4],
- ((u8 *)dev->dev_addr)[5]);
-
- pr_notice("%s: speed %d",
- dev->name,
- ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
- / (1 << nl->csr1.rate));
-
- if( nl->delta_rxl == 0 )
- pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
- else
- pr_cont(", receive level (auto)\n");
-
-#ifdef CONFIG_SBNI_MULTILINE
- nl->master = dev;
- nl->link = NULL;
-#endif
-
- sbni_cards[ num++ ] = dev;
- return dev;
-}
-
-/* -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_SBNI_MULTILINE
-
-static netdev_tx_t
-sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
-{
- struct net_device *p;
-
- netif_stop_queue( dev );
-
- /* Looking for idle device in the list */
- for( p = dev; p; ) {
- struct net_local *nl = netdev_priv(p);
- spin_lock( &nl->lock );
- if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
- p = nl->link;
- spin_unlock( &nl->lock );
- } else {
- /* Idle dev is found */
- prepare_to_send( skb, p );
- spin_unlock( &nl->lock );
- netif_start_queue( dev );
- return NETDEV_TX_OK;
- }
- }
-
- return NETDEV_TX_BUSY;
-}
-
-#else /* CONFIG_SBNI_MULTILINE */
-
-static netdev_tx_t
-sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- netif_stop_queue( dev );
- spin_lock( &nl->lock );
-
- prepare_to_send( skb, dev );
-
- spin_unlock( &nl->lock );
- return NETDEV_TX_OK;
-}
-
-#endif /* CONFIG_SBNI_MULTILINE */
-
-/* -------------------------------------------------------------------------- */
-
-/* interrupt handler */
-
-/*
- * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
- * be looked as two independent single-channel devices. Every channel seems
- * as Ethernet interface but interrupt handler must be common. Really, first
- * channel ("master") driver only registers the handler. In its struct net_local
- * it has got pointer to "slave" channel's struct net_local and handles that's
- * interrupts too.
- * dev of successfully attached ISA SBNI boards is linked to list.
- * While next board driver is initialized, it scans this list. If one
- * has found dev with same irq and ioaddr different by 4 then it assumes
- * this board to be "master".
- */
-
-static irqreturn_t
-sbni_interrupt( int irq, void *dev_id )
-{
- struct net_device *dev = dev_id;
- struct net_local *nl = netdev_priv(dev);
- int repeat;
-
- spin_lock( &nl->lock );
- if( nl->second )
- spin_lock(&NET_LOCAL_LOCK(nl->second));
-
- do {
- repeat = 0;
- if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) ) {
- handle_channel( dev );
- repeat = 1;
- }
- if( nl->second && /* second channel present */
- (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) {
- handle_channel( nl->second );
- repeat = 1;
- }
- } while( repeat );
-
- if( nl->second )
- spin_unlock(&NET_LOCAL_LOCK(nl->second));
- spin_unlock( &nl->lock );
- return IRQ_HANDLED;
-}
-
-
-static void
-handle_channel( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
-
- int req_ans;
- unsigned char csr0;
-
-#ifdef CONFIG_SBNI_MULTILINE
- /* Lock the master device because we going to change its local data */
- if( nl->state & FL_SLAVE )
- spin_lock(&NET_LOCAL_LOCK(nl->master));
-#endif
-
- outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
-
- nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
- for(;;) {
- csr0 = inb( ioaddr + CSR0 );
- if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
- break;
-
- req_ans = !(nl->state & FL_PREV_OK);
-
- if( csr0 & RC_RDY )
- req_ans = recv_frame( dev );
-
- /*
- * TR_RDY always equals 1 here because we have owned the marker,
- * and we set TR_REQ when disabled interrupts
- */
- csr0 = inb( ioaddr + CSR0 );
- if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
- netdev_err(dev, "internal error!\n");
-
- /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
- if( req_ans || nl->tx_frameno != 0 )
- send_frame( dev );
- else
- /* send marker without any data */
- outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
- }
-
- outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
-
-#ifdef CONFIG_SBNI_MULTILINE
- if( nl->state & FL_SLAVE )
- spin_unlock(&NET_LOCAL_LOCK(nl->master));
-#endif
-}
-
-
-/*
- * Routine returns 1 if it needs to acknowledge received frame.
- * Empty frame received without errors won't be acknowledged.
- */
-
-static int
-recv_frame( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
-
- u32 crc = CRC32_INITIAL;
-
- unsigned framelen = 0, frameno, ack;
- unsigned is_first, frame_ok = 0;
-
- if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
- frame_ok = framelen > 4
- ? upload_data( dev, framelen, frameno, is_first, crc )
- : skip_tail( ioaddr, framelen, crc );
- if( frame_ok )
- interpret_ack( dev, ack );
- }
-
- outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
- if( frame_ok ) {
- nl->state |= FL_PREV_OK;
- if( framelen > 4 )
- nl->in_stats.all_rx_number++;
- } else {
- nl->state &= ~FL_PREV_OK;
- change_level( dev );
- nl->in_stats.all_rx_number++;
- nl->in_stats.bad_rx_number++;
- }
-
- return !frame_ok || framelen > 4;
-}
-
-
-static void
-send_frame( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- u32 crc = CRC32_INITIAL;
-
- if( nl->state & FL_NEED_RESEND ) {
-
- /* if frame was sended but not ACK'ed - resend it */
- if( nl->trans_errors ) {
- --nl->trans_errors;
- if( nl->framelen != 0 )
- nl->in_stats.resend_tx_number++;
- } else {
- /* cannot xmit with many attempts */
-#ifdef CONFIG_SBNI_MULTILINE
- if( (nl->state & FL_SLAVE) || nl->link )
-#endif
- nl->state |= FL_LINE_DOWN;
- drop_xmit_queue( dev );
- goto do_send;
- }
- } else
- nl->trans_errors = TR_ERROR_COUNT;
-
- send_frame_header( dev, &crc );
- nl->state |= FL_NEED_RESEND;
- /*
- * FL_NEED_RESEND will be cleared after ACK, but if empty
- * frame sended then in prepare_to_send next frame
- */
-
-
- if( nl->framelen ) {
- download_data( dev, &crc );
- nl->in_stats.all_tx_number++;
- nl->state |= FL_WAIT_ACK;
- }
-
- outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
-
-do_send:
- outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
-
- if( nl->tx_frameno )
- /* next frame exists - we request card to send it */
- outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
- dev->base_addr + CSR0 );
-}
-
-
-/*
- * Write the frame data into adapter's buffer memory, and calculate CRC.
- * Do padding if necessary.
- */
-
-static void
-download_data( struct net_device *dev, u32 *crc_p )
-{
- struct net_local *nl = netdev_priv(dev);
- struct sk_buff *skb = nl->tx_buf_p;
-
- unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
-
- outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
- *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
-
- /* if packet too short we should write some more bytes to pad */
- for( len = nl->framelen - len; len--; ) {
- outb( 0, dev->base_addr + DAT );
- *crc_p = CRC32( 0, *crc_p );
- }
-}
-
-
-static int
-upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
- unsigned is_first, u32 crc )
-{
- struct net_local *nl = netdev_priv(dev);
-
- int frame_ok;
-
- if( is_first ) {
- nl->wait_frameno = frameno;
- nl->inppos = 0;
- }
-
- if( nl->wait_frameno == frameno ) {
-
- if( nl->inppos + framelen <= ETHER_MAX_LEN )
- frame_ok = append_frame_to_pkt( dev, framelen, crc );
-
- /*
- * if CRC is right but framelen incorrect then transmitter
- * error was occurred... drop entire packet
- */
- else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
- != 0 ) {
- nl->wait_frameno = 0;
- nl->inppos = 0;
-#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++;
- nl->master->stats.rx_missed_errors++;
-#else
- dev->stats.rx_errors++;
- dev->stats.rx_missed_errors++;
-#endif
- }
- /* now skip all frames until is_first != 0 */
- } else
- frame_ok = skip_tail( dev->base_addr, framelen, crc );
-
- if( is_first && !frame_ok ) {
- /*
- * Frame has been broken, but we had already stored
- * is_first... Drop entire packet.
- */
- nl->wait_frameno = 0;
-#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++;
- nl->master->stats.rx_crc_errors++;
-#else
- dev->stats.rx_errors++;
- dev->stats.rx_crc_errors++;
-#endif
- }
-
- return frame_ok;
-}
-
-
-static inline void
-send_complete( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
-#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.tx_packets++;
- nl->master->stats.tx_bytes += nl->tx_buf_p->len;
-#else
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += nl->tx_buf_p->len;
-#endif
- dev_consume_skb_irq(nl->tx_buf_p);
-
- nl->tx_buf_p = NULL;
-
- nl->outpos = 0;
- nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
- nl->framelen = 0;
-}
-
-
-static void
-interpret_ack( struct net_device *dev, unsigned ack )
-{
- struct net_local *nl = netdev_priv(dev);
-
- if( ack == FRAME_SENT_OK ) {
- nl->state &= ~FL_NEED_RESEND;
-
- if( nl->state & FL_WAIT_ACK ) {
- nl->outpos += nl->framelen;
-
- if( --nl->tx_frameno ) {
- nl->framelen = min_t(unsigned int,
- nl->maxframe,
- nl->tx_buf_p->len - nl->outpos);
- } else {
- send_complete( dev );
-#ifdef CONFIG_SBNI_MULTILINE
- netif_wake_queue( nl->master );
-#else
- netif_wake_queue( dev );
-#endif
- }
- }
- }
-
- nl->state &= ~FL_WAIT_ACK;
-}
-
-
-/*
- * Glue received frame with previous fragments of packet.
- * Indicate packet when last frame would be accepted.
- */
-
-static int
-append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
-{
- struct net_local *nl = netdev_priv(dev);
-
- u8 *p;
-
- if( nl->inppos + framelen > ETHER_MAX_LEN )
- return 0;
-
- if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
- return 0;
-
- p = nl->rx_buf_p->data + nl->inppos;
- insb( dev->base_addr + DAT, p, framelen );
- if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
- return 0;
-
- nl->inppos += framelen - 4;
- if( --nl->wait_frameno == 0 ) /* last frame received */
- indicate_pkt( dev );
-
- return 1;
-}
-
-
-/*
- * Prepare to start output on adapter.
- * Transmitter will be actually activated when marker is accepted.
- */
-
-static void
-prepare_to_send( struct sk_buff *skb, struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- unsigned int len;
-
- /* nl->tx_buf_p == NULL here! */
- if( nl->tx_buf_p )
- netdev_err(dev, "memory leak!\n");
-
- nl->outpos = 0;
- nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
-
- len = skb->len;
- if( len < SBNI_MIN_LEN )
- len = SBNI_MIN_LEN;
-
- nl->tx_buf_p = skb;
- nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
- nl->framelen = len < nl->maxframe ? len : nl->maxframe;
-
- outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
-#ifdef CONFIG_SBNI_MULTILINE
- netif_trans_update(nl->master);
-#else
- netif_trans_update(dev);
-#endif
-}
-
-
-static void
-drop_xmit_queue( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- if( nl->tx_buf_p ) {
- dev_kfree_skb_any( nl->tx_buf_p );
- nl->tx_buf_p = NULL;
-#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.tx_errors++;
- nl->master->stats.tx_carrier_errors++;
-#else
- dev->stats.tx_errors++;
- dev->stats.tx_carrier_errors++;
-#endif
- }
-
- nl->tx_frameno = 0;
- nl->framelen = 0;
- nl->outpos = 0;
- nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
-#ifdef CONFIG_SBNI_MULTILINE
- netif_start_queue( nl->master );
- netif_trans_update(nl->master);
-#else
- netif_start_queue( dev );
- netif_trans_update(dev);
-#endif
-}
-
-
-static void
-send_frame_header( struct net_device *dev, u32 *crc_p )
-{
- struct net_local *nl = netdev_priv(dev);
-
- u32 crc = *crc_p;
- u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
- u8 value;
-
- if( nl->state & FL_NEED_RESEND )
- len_field |= FRAME_RETRY; /* non-first attempt... */
-
- if( nl->outpos == 0 )
- len_field |= FRAME_FIRST;
-
- len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
- outb( SBNI_SIG, dev->base_addr + DAT );
-
- value = (u8) len_field;
- outb( value, dev->base_addr + DAT );
- crc = CRC32( value, crc );
- value = (u8) (len_field >> 8);
- outb( value, dev->base_addr + DAT );
- crc = CRC32( value, crc );
-
- outb( nl->tx_frameno, dev->base_addr + DAT );
- crc = CRC32( nl->tx_frameno, crc );
- outb( 0, dev->base_addr + DAT );
- crc = CRC32( 0, crc );
- *crc_p = crc;
-}
-
-
-/*
- * if frame tail not needed (incorrect number or received twice),
- * it won't store, but CRC will be calculated
- */
-
-static int
-skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
-{
- while( tail_len-- )
- crc = CRC32( inb( ioaddr + DAT ), crc );
-
- return crc == CRC32_REMAINDER;
-}
-
-
-/*
- * Preliminary checks if frame header is correct, calculates its CRC
- * and split it to simple fields
- */
-
-static int
-check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
- u32 *is_first, u32 *crc_p )
-{
- u32 crc = *crc_p;
- u8 value;
-
- if( inb( ioaddr + DAT ) != SBNI_SIG )
- return 0;
-
- value = inb( ioaddr + DAT );
- *framelen = (u32)value;
- crc = CRC32( value, crc );
- value = inb( ioaddr + DAT );
- *framelen |= ((u32)value) << 8;
- crc = CRC32( value, crc );
-
- *ack = *framelen & FRAME_ACK_MASK;
- *is_first = (*framelen & FRAME_FIRST) != 0;
-
- if( (*framelen &= FRAME_LEN_MASK) < 6 ||
- *framelen > SBNI_MAX_FRAME - 3 )
- return 0;
-
- value = inb( ioaddr + DAT );
- *frameno = (u32)value;
- crc = CRC32( value, crc );
-
- crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
- *framelen -= 2;
-
- *crc_p = crc;
- return 1;
-}
-
-
-static struct sk_buff *
-get_rx_buf( struct net_device *dev )
-{
- /* +2 is to compensate for the alignment fixup below */
- struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
- if( !skb )
- return NULL;
-
- skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
- return skb;
-}
-
-
-static void
-indicate_pkt( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
- struct sk_buff *skb = nl->rx_buf_p;
-
- skb_put( skb, nl->inppos );
-
-#ifdef CONFIG_SBNI_MULTILINE
- skb->protocol = eth_type_trans( skb, nl->master );
- netif_rx( skb );
- ++nl->master->stats.rx_packets;
- nl->master->stats.rx_bytes += nl->inppos;
-#else
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
- ++dev->stats.rx_packets;
- dev->stats.rx_bytes += nl->inppos;
-#endif
- nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
-}
-
-
-/* -------------------------------------------------------------------------- */
-
-/*
- * Routine checks periodically wire activity and regenerates marker if
- * connect was inactive for a long time.
- */
-
-static void
-sbni_watchdog(struct timer_list *t)
-{
- struct net_local *nl = from_timer(nl, t, watchdog);
- struct net_device *dev = nl->watchdog_dev;
- unsigned long flags;
- unsigned char csr0;
-
- spin_lock_irqsave( &nl->lock, flags );
-
- csr0 = inb( dev->base_addr + CSR0 );
- if( csr0 & RC_CHK ) {
-
- if( nl->timer_ticks ) {
- if( csr0 & (RC_RDY | BU_EMP) )
- /* receiving not active */
- nl->timer_ticks--;
- } else {
- nl->in_stats.timeout_number++;
- if( nl->delta_rxl )
- timeout_change_level( dev );
-
- outb( *(u_char *)&nl->csr1 | PR_RES,
- dev->base_addr + CSR1 );
- csr0 = inb( dev->base_addr + CSR0 );
- }
- } else
- nl->state &= ~FL_LINE_DOWN;
-
- outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
-
- mod_timer(t, jiffies + SBNI_TIMEOUT);
-
- spin_unlock_irqrestore( &nl->lock, flags );
-}
-
-
-static unsigned char rxl_tab[] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
- 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
-};
-
-#define SIZE_OF_TIMEOUT_RXL_TAB 4
-static unsigned char timeout_rxl_tab[] = {
- 0x03, 0x05, 0x08, 0x0b
-};
-
-/* -------------------------------------------------------------------------- */
-
-static void
-card_start( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
- nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
- nl->state |= FL_PREV_OK;
-
- nl->inppos = nl->outpos = 0;
- nl->wait_frameno = 0;
- nl->tx_frameno = 0;
- nl->framelen = 0;
-
- outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
- outb( EN_INT, dev->base_addr + CSR0 );
-}
-
-/* -------------------------------------------------------------------------- */
-
-/* Receive level auto-selection */
-
-static void
-change_level( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
- return;
-
- if( nl->cur_rxl_index == 0 )
- nl->delta_rxl = 1;
- else if( nl->cur_rxl_index == 15 )
- nl->delta_rxl = -1;
- else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
- nl->delta_rxl = -nl->delta_rxl;
-
- nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
- inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
- outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
-
- nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
- nl->cur_rxl_rcvd = 0;
-}
-
-
-static void
-timeout_change_level( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
- if( ++nl->timeout_rxl >= 4 )
- nl->timeout_rxl = 0;
-
- nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
- inb( dev->base_addr + CSR0 );
- outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
-
- nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
- nl->cur_rxl_rcvd = 0;
-}
-
-/* -------------------------------------------------------------------------- */
-
-/*
- * Open/initialize the board.
- */
-
-static int
-sbni_open( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
- struct timer_list *w = &nl->watchdog;
-
- /*
- * For double ISA adapters within "common irq" mode, we have to
- * determine whether primary or secondary channel is initialized,
- * and set the irq handler only in first case.
- */
- if( dev->base_addr < 0x400 ) { /* ISA only */
- struct net_device **p = sbni_cards;
- for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
- if( (*p)->irq == dev->irq &&
- ((*p)->base_addr == dev->base_addr + 4 ||
- (*p)->base_addr == dev->base_addr - 4) &&
- (*p)->flags & IFF_UP ) {
-
- ((struct net_local *) (netdev_priv(*p)))
- ->second = dev;
- netdev_notice(dev, "using shared irq with %s\n",
- (*p)->name);
- nl->state |= FL_SECONDARY;
- goto handler_attached;
- }
- }
-
- if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
- netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
- return -EAGAIN;
- }
-
-handler_attached:
-
- spin_lock( &nl->lock );
- memset( &dev->stats, 0, sizeof(struct net_device_stats) );
- memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
-
- card_start( dev );
-
- netif_start_queue( dev );
-
- /* set timer watchdog */
- nl->watchdog_dev = dev;
- timer_setup(w, sbni_watchdog, 0);
- w->expires = jiffies + SBNI_TIMEOUT;
- add_timer( w );
-
- spin_unlock( &nl->lock );
- return 0;
-}
-
-
-static int
-sbni_close( struct net_device *dev )
-{
- struct net_local *nl = netdev_priv(dev);
-
- if( nl->second && nl->second->flags & IFF_UP ) {
- netdev_notice(dev, "Secondary channel (%s) is active!\n",
- nl->second->name);
- return -EBUSY;
- }
-
-#ifdef CONFIG_SBNI_MULTILINE
- if( nl->state & FL_SLAVE )
- emancipate( dev );
- else
- while( nl->link ) /* it's master device! */
- emancipate( nl->link );
-#endif
-
- spin_lock( &nl->lock );
-
- nl->second = NULL;
- drop_xmit_queue( dev );
- netif_stop_queue( dev );
-
- del_timer( &nl->watchdog );
-
- outb( 0, dev->base_addr + CSR0 );
-
- if( !(nl->state & FL_SECONDARY) )
- free_irq( dev->irq, dev );
- nl->state &= FL_SECONDARY;
-
- spin_unlock( &nl->lock );
- return 0;
-}
-
-
-/*
- Valid combinations in CSR0 (for probing):
-
- VALID_DECODER 0000,0011,1011,1010
-
- ; 0 ; -
- TR_REQ ; 1 ; +
- TR_RDY ; 2 ; -
- TR_RDY TR_REQ ; 3 ; +
- BU_EMP ; 4 ; +
- BU_EMP TR_REQ ; 5 ; +
- BU_EMP TR_RDY ; 6 ; -
- BU_EMP TR_RDY TR_REQ ; 7 ; +
- RC_RDY ; 8 ; +
- RC_RDY TR_REQ ; 9 ; +
- RC_RDY TR_RDY ; 10 ; -
- RC_RDY TR_RDY TR_REQ ; 11 ; -
- RC_RDY BU_EMP ; 12 ; -
- RC_RDY BU_EMP TR_REQ ; 13 ; -
- RC_RDY BU_EMP TR_RDY ; 14 ; -
- RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
-*/
-
-#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
-
-
-static int
-sbni_card_probe( unsigned long ioaddr )
-{
- unsigned char csr0;
-
- csr0 = inb( ioaddr + CSR0 );
- if( csr0 != 0xff && csr0 != 0x00 ) {
- csr0 &= ~EN_INT;
- if( csr0 & BU_EMP )
- csr0 |= EN_INT;
-
- if( VALID_DECODER & (1 << (csr0 >> 4)) )
- return 0;
- }
-
- return -ENODEV;
-}
-
-/* -------------------------------------------------------------------------- */
-
-static int
-sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
-{
- struct net_local *nl = netdev_priv(dev);
- struct sbni_flags flags;
- int error = 0;
-
-#ifdef CONFIG_SBNI_MULTILINE
- struct net_device *slave_dev;
- char slave_name[ 8 ];
-#endif
-
- switch( cmd ) {
- case SIOCDEVGETINSTATS :
- if (copy_to_user( ifr->ifr_data, &nl->in_stats,
- sizeof(struct sbni_in_stats) ))
- error = -EFAULT;
- break;
-
- case SIOCDEVRESINSTATS :
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
- break;
-
- case SIOCDEVGHWSTATE :
- flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
- flags.rate = nl->csr1.rate;
- flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
- flags.rxl = nl->cur_rxl_index;
- flags.fixed_rxl = nl->delta_rxl == 0;
-
- if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
- error = -EFAULT;
- break;
-
- case SIOCDEVSHWSTATE :
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- spin_lock( &nl->lock );
- flags = *(struct sbni_flags*) &ifr->ifr_ifru;
- if( flags.fixed_rxl ) {
- nl->delta_rxl = 0;
- nl->cur_rxl_index = flags.rxl;
- } else {
- nl->delta_rxl = DEF_RXL_DELTA;
- nl->cur_rxl_index = DEF_RXL;
- }
-
- nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
- nl->csr1.rate = flags.rate;
- outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
- spin_unlock( &nl->lock );
- break;
-
-#ifdef CONFIG_SBNI_MULTILINE
-
- case SIOCDEVENSLAVE :
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
- return -EFAULT;
- slave_dev = dev_get_by_name(&init_net, slave_name );
- if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
- netdev_err(dev, "trying to enslave non-active device %s\n",
- slave_name);
- if (slave_dev)
- dev_put(slave_dev);
- return -EPERM;
- }
-
- return enslave( dev, slave_dev );
-
- case SIOCDEVEMANSIPATE :
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- return emancipate( dev );
-
-#endif /* CONFIG_SBNI_MULTILINE */
-
- default :
- return -EOPNOTSUPP;
- }
-
- return error;
-}
-
-
-#ifdef CONFIG_SBNI_MULTILINE
-
-static int
-enslave( struct net_device *dev, struct net_device *slave_dev )
-{
- struct net_local *nl = netdev_priv(dev);
- struct net_local *snl = netdev_priv(slave_dev);
-
- if( nl->state & FL_SLAVE ) /* This isn't master or free device */
- return -EBUSY;
-
- if( snl->state & FL_SLAVE ) /* That was already enslaved */
- return -EBUSY;
-
- spin_lock( &nl->lock );
- spin_lock( &snl->lock );
-
- /* append to list */
- snl->link = nl->link;
- nl->link = slave_dev;
- snl->master = dev;
- snl->state |= FL_SLAVE;
-
- /* Summary statistics of MultiLine operation will be stored
- in master's counters */
- memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
- netif_stop_queue( slave_dev );
- netif_wake_queue( dev ); /* Now we are able to transmit */
-
- spin_unlock( &snl->lock );
- spin_unlock( &nl->lock );
- netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
- return 0;
-}
-
-
-static int
-emancipate( struct net_device *dev )
-{
- struct net_local *snl = netdev_priv(dev);
- struct net_device *p = snl->master;
- struct net_local *nl = netdev_priv(p);
-
- if( !(snl->state & FL_SLAVE) )
- return -EINVAL;
-
- spin_lock( &nl->lock );
- spin_lock( &snl->lock );
- drop_xmit_queue( dev );
-
- /* exclude from list */
- for(;;) { /* must be in list */
- struct net_local *t = netdev_priv(p);
- if( t->link == dev ) {
- t->link = snl->link;
- break;
- }
- p = t->link;
- }
-
- snl->link = NULL;
- snl->master = dev;
- snl->state &= ~FL_SLAVE;
-
- netif_start_queue( dev );
-
- spin_unlock( &snl->lock );
- spin_unlock( &nl->lock );
-
- dev_put( dev );
- return 0;
-}
-
-#endif
-
-static void
-set_multicast_list( struct net_device *dev )
-{
- return; /* sbni always operate in promiscuos mode */
-}
-
-
-#ifdef MODULE
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(baud, int, NULL, 0);
-module_param_array(rxl, int, NULL, 0);
-module_param_array(mac, int, NULL, 0);
-module_param(skip_pci_probe, bool, 0);
-
-MODULE_LICENSE("GPL");
-
-
-int __init init_module( void )
-{
- struct net_device *dev;
- int err;
-
- while( num < SBNI_MAX_NUM_CARDS ) {
- dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
- NET_NAME_UNKNOWN, sbni_devsetup);
- if( !dev)
- break;
-
- sprintf( dev->name, "sbni%d", num );
-
- err = sbni_init(dev);
- if (err) {
- free_netdev(dev);
- break;
- }
-
- if( register_netdev( dev ) ) {
- release_region( dev->base_addr, SBNI_IO_EXTENT );
- free_netdev( dev );
- break;
- }
- }
-
- return *sbni_cards ? 0 : -ENODEV;
-}
-
-void
-cleanup_module(void)
-{
- int i;
-
- for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) {
- struct net_device *dev = sbni_cards[i];
- if (dev != NULL) {
- unregister_netdev(dev);
- release_region(dev->base_addr, SBNI_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-
-#else /* MODULE */
-
-static int __init
-sbni_setup( char *p )
-{
- int n, parm;
-
- if( *p++ != '(' )
- goto bad_param;
-
- for( n = 0, parm = 0; *p && n < 8; ) {
- (*dest[ parm ])[ n ] = simple_strtoul( p, &p, 0 );
- if( !*p || *p == ')' )
- return 1;
- if( *p == ';' ) {
- ++p;
- ++n;
- parm = 0;
- } else if( *p++ != ',' ) {
- break;
- } else {
- if( ++parm >= 5 )
- break;
- }
- }
-bad_param:
- pr_err("Error in sbni kernel parameter!\n");
- return 0;
-}
-
-__setup( "sbni=", sbni_setup );
-
-#endif /* MODULE */
-
-/* -------------------------------------------------------------------------- */
-
-static u32
-calc_crc32( u32 crc, u8 *p, u32 len )
-{
- while( len-- )
- crc = CRC32( *p++, crc );
-
- return crc;
-}
-
-static u32 crc32tab[] __attribute__ ((aligned(8))) = {
- 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
- 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
- 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
- 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
- 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
- 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
- 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
- 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
- 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
- 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
- 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
- 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
- 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
- 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
- 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
- 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
- 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
- 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
- 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
- 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
- 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
- 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
- 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
- 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
- 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
- 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
- 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
- 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
- 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
- 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
- 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
- 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
- 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
- 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
- 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
- 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
- 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
- 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
- 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
- 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
- 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
- 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
- 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
- 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
- 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
- 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
- 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
- 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
- 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
- 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
- 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
- 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
- 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
- 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
- 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
- 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
- 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
- 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
- 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
- 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
- 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
- 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
- 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
- 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
-};
-
diff --git a/drivers/net/wan/sbni.h b/drivers/net/wan/sbni.h
deleted file mode 100644
index 84264510a8ed..000000000000
--- a/drivers/net/wan/sbni.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* sbni.h: definitions for a Granch SBNI12 driver, version 5.0.0
- * Written 2001 Denis I.Timofeev (timofeev@granch.ru)
- * This file is distributed under the GNU GPL
- */
-
-#ifndef SBNI_H
-#define SBNI_H
-
-#ifdef SBNI_DEBUG
-#define DP( A ) A
-#else
-#define DP( A )
-#endif
-
-
-/* We don't have official vendor id yet... */
-#define SBNI_PCI_VENDOR 0x55
-#define SBNI_PCI_DEVICE 0x9f
-
-#define ISA_MODE 0x00
-#define PCI_MODE 0x01
-
-#define SBNI_IO_EXTENT 4
-
-enum sbni_reg {
- CSR0 = 0,
- CSR1 = 1,
- DAT = 2
-};
-
-/* CSR0 mapping */
-enum {
- BU_EMP = 0x02,
- RC_CHK = 0x04,
- CT_ZER = 0x08,
- TR_REQ = 0x10,
- TR_RDY = 0x20,
- EN_INT = 0x40,
- RC_RDY = 0x80
-};
-
-
-/* CSR1 mapping */
-#define PR_RES 0x80
-
-struct sbni_csr1 {
-#ifdef __LITTLE_ENDIAN_BITFIELD
- u8 rxl : 5;
- u8 rate : 2;
- u8 : 1;
-#else
- u8 : 1;
- u8 rate : 2;
- u8 rxl : 5;
-#endif
-};
-
-/* fields in frame header */
-#define FRAME_ACK_MASK (unsigned short)0x7000
-#define FRAME_LEN_MASK (unsigned short)0x03FF
-#define FRAME_FIRST (unsigned short)0x8000
-#define FRAME_RETRY (unsigned short)0x0800
-
-#define FRAME_SENT_BAD (unsigned short)0x4000
-#define FRAME_SENT_OK (unsigned short)0x3000
-
-
-/* state flags */
-enum {
- FL_WAIT_ACK = 0x01,
- FL_NEED_RESEND = 0x02,
- FL_PREV_OK = 0x04,
- FL_SLOW_MODE = 0x08,
- FL_SECONDARY = 0x10,
-#ifdef CONFIG_SBNI_MULTILINE
- FL_SLAVE = 0x20,
-#endif
- FL_LINE_DOWN = 0x40
-};
-
-
-enum {
- DEFAULT_IOBASEADDR = 0x210,
- DEFAULT_INTERRUPTNUMBER = 5,
- DEFAULT_RATE = 0,
- DEFAULT_FRAME_LEN = 1012
-};
-
-#define DEF_RXL_DELTA -1
-#define DEF_RXL 0xf
-
-#define SBNI_SIG 0x5a
-
-#define SBNI_MIN_LEN 60 /* Shortest Ethernet frame without FCS */
-#define SBNI_MAX_FRAME 1023
-#define ETHER_MAX_LEN 1518
-
-#define SBNI_TIMEOUT (HZ/10)
-
-#define TR_ERROR_COUNT 32
-#define CHANGE_LEVEL_START_TICKS 4
-
-#define SBNI_MAX_NUM_CARDS 16
-
-/* internal SBNI-specific statistics */
-struct sbni_in_stats {
- u32 all_rx_number;
- u32 bad_rx_number;
- u32 timeout_number;
- u32 all_tx_number;
- u32 resend_tx_number;
-};
-
-/* SBNI ioctl params */
-#define SIOCDEVGETINSTATS SIOCDEVPRIVATE
-#define SIOCDEVRESINSTATS SIOCDEVPRIVATE+1
-#define SIOCDEVGHWSTATE SIOCDEVPRIVATE+2
-#define SIOCDEVSHWSTATE SIOCDEVPRIVATE+3
-#define SIOCDEVENSLAVE SIOCDEVPRIVATE+4
-#define SIOCDEVEMANSIPATE SIOCDEVPRIVATE+5
-
-
-/* data packet for SIOCDEVGHWSTATE/SIOCDEVSHWSTATE ioctl requests */
-struct sbni_flags {
- u32 rxl : 4;
- u32 rate : 2;
- u32 fixed_rxl : 1;
- u32 slow_mode : 1;
- u32 mac_addr : 24;
-};
-
-/*
- * CRC-32 stuff
- */
-#define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00FFFFFF))
- /* CRC generator 0xEDB88320 */
- /* CRC remainder 0x2144DF1C */
- /* CRC initial value 0x00000000 */
-#define CRC32_REMAINDER 0x2144DF1C
-#define CRC32_INITIAL 0x00000000
-
-#ifndef __initdata
-#define __initdata
-#endif
-
-#endif
-
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 4403e219ca03..eddd20aab691 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -124,14 +124,6 @@ static int sealevel_close(struct net_device *d)
return 0;
}
-static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
-{
- /* struct slvl_device *slvl=dev_to_chan(d);
- * z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd)
- */
- return hdlc_ioctl(d, ifr, cmd);
-}
-
/* Passed network frames, fire them downwind. */
static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
@@ -152,7 +144,7 @@ static const struct net_device_ops sealevel_ops = {
.ndo_open = sealevel_open,
.ndo_stop = sealevel_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = sealevel_ioctl,
+ .ndo_siocwandev = hdlc_ioctl,
};
static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index f22e48415e6f..5a9e262188ef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -343,20 +343,17 @@ static int wanxl_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
struct port *port = dev_to_port(dev);
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
- switch (ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE:
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
@@ -364,7 +361,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
line.clock_rate = 0;
line.loopback = 0;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
@@ -374,7 +371,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
+ if (copy_from_user(&line, ifs->ifs_ifsu.sync,
size))
return -EFAULT;
@@ -389,7 +386,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -545,7 +542,7 @@ static const struct net_device_ops wanxl_ops = {
.ndo_open = wanxl_open,
.ndo_stop = wanxl_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = wanxl_ioctl,
+ .ndo_siocwandev = wanxl_ioctl,
.ndo_get_stats = wanxl_get_stats,
};
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index fd37d4d2983b..65dd8cff1b01 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1144,7 +1144,7 @@ static int waitbusy(struct airo_info *ai);
static irqreturn_t airo_interrupt(int irq, void* dev_id);
static int airo_thread(void *data);
static void timer_func(struct net_device *dev);
-static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *, int cmd);
static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev);
#ifdef CISCO_EXT
static int readrids(struct net_device *dev, aironet_ioctl *comp);
@@ -2664,7 +2664,7 @@ static const struct net_device_ops airo11_netdev_ops = {
.ndo_start_xmit = airo_start_xmit11,
.ndo_get_stats = airo_get_stats,
.ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
+ .ndo_siocdevprivate = airo_siocdevprivate,
};
static void wifi_setup(struct net_device *dev)
@@ -2764,7 +2764,7 @@ static const struct net_device_ops airo_netdev_ops = {
.ndo_get_stats = airo_get_stats,
.ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
+ .ndo_siocdevprivate = airo_siocdevprivate,
.ndo_validate_addr = eth_validate_addr,
};
@@ -2775,7 +2775,7 @@ static const struct net_device_ops mpi_netdev_ops = {
.ndo_get_stats = airo_get_stats,
.ndo_set_rx_mode = airo_set_multicast_list,
.ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
+ .ndo_siocdevprivate = airo_siocdevprivate,
.ndo_validate_addr = eth_validate_addr,
};
@@ -7661,7 +7661,8 @@ static const struct iw_handler_def airo_handler_def =
* Javier Achirica did a great job of merging code from the unnamed CISCO
* developer that added support for flashing the card.
*/
-static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
int rc = 0;
struct airo_info *ai = dev->ml_priv;
@@ -7678,7 +7679,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int val = AIROMAGIC;
aironet_ioctl com;
- if (copy_from_user(&com, rq->ifr_data, sizeof(com)))
+ if (copy_from_user(&com, data, sizeof(com)))
rc = -EFAULT;
else if (copy_to_user(com.data, (char *)&val, sizeof(val)))
rc = -EFAULT;
@@ -7694,7 +7695,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*/
{
aironet_ioctl com;
- if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
+ if (copy_from_user(&com, data, sizeof(com))) {
rc = -EFAULT;
break;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h
index c4b81ff7d7e4..c17ab6dbbb53 100644
--- a/drivers/net/wireless/intersil/hostap/hostap.h
+++ b/drivers/net/wireless/intersil/hostap/hostap.h
@@ -93,6 +93,7 @@ extern const struct iw_handler_def hostap_iw_handler_def;
extern const struct ethtool_ops prism2_ethtool_ops;
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-
+int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 49766b285230..0a376f112db9 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3941,7 +3941,8 @@ const struct iw_handler_def hostap_iw_handler_def =
.get_wireless_stats = hostap_get_wireless_stats,
};
-
+/* Private ioctls (iwpriv) that have not yet been converted
+ * into new wireless extensions API */
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct iwreq *wrq = (struct iwreq *) ifr;
@@ -3953,9 +3954,6 @@ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
local = iface->local;
switch (cmd) {
- /* Private ioctls (iwpriv) that have not yet been converted
- * into new wireless extensions API */
-
case PRISM2_IOCTL_INQUIRE:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name);
@@ -4009,11 +4007,31 @@ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
wrq->u.ap_addr.sa_data);
break;
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+/* Private ioctls that are not used with iwpriv;
+ * in SIOCDEVPRIVATE range */
+int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)ifr;
+ struct hostap_interface *iface;
+ local_info_t *local;
+ int ret = 0;
- /* Private ioctls that are not used with iwpriv;
- * in SIOCDEVPRIVATE range */
+ iface = netdev_priv(dev);
+ local = iface->local;
+
+ if (in_compat_syscall()) /* not implemented yet */
+ return -EOPNOTSUPP;
+ switch (cmd) {
#ifdef PRISM2_DOWNLOAD_SUPPORT
case PRISM2_IOCTL_DOWNLOAD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index de97b3304115..54f67b682b6a 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -797,6 +797,7 @@ static const struct net_device_ops hostap_netdev_ops = {
.ndo_open = prism2_open,
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
+ .ndo_siocdevprivate = hostap_siocdevprivate,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_tx_timeout = prism2_tx_timeout,
@@ -809,6 +810,7 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = {
.ndo_open = prism2_open,
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
+ .ndo_siocdevprivate = hostap_siocdevprivate,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_tx_timeout = prism2_tx_timeout,
@@ -821,6 +823,7 @@ static const struct net_device_ops hostap_master_ops = {
.ndo_open = prism2_open,
.ndo_stop = prism2_close,
.ndo_do_ioctl = hostap_ioctl,
+ .ndo_siocdevprivate = hostap_siocdevprivate,
.ndo_set_mac_address = prism2_set_mac_address,
.ndo_set_rx_mode = hostap_set_multicast_list,
.ndo_tx_timeout = prism2_tx_timeout,
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index de9384326bc8..77dbfc418bce 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -38,6 +38,18 @@ config MHI_WWAN_CTRL
To compile this driver as a module, choose M here: the module will be
called mhi_wwan_ctrl.
+config MHI_WWAN_MBIM
+ tristate "MHI WWAN MBIM network driver for QCOM-based PCIe modems"
+ depends on MHI_BUS
+ help
+ MHI WWAN MBIM is a WWAN network driver for QCOM-based PCIe modems.
+ It implements MBIM over MHI, for IP data aggregation and muxing.
+ A default wwan0 network interface is created for MBIM data session
+ ID 0. Additional links can be created via wwan rtnetlink type.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mhi_wwan_mbim.
+
config RPMSG_WWAN_CTRL
tristate "RPMSG WWAN control driver"
depends on RPMSG
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
index d90ac33abaef..fe51feedac21 100644
--- a/drivers/net/wwan/Makefile
+++ b/drivers/net/wwan/Makefile
@@ -9,5 +9,6 @@ wwan-objs += wwan_core.o
obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o
obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
+obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o
obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o
obj-$(CONFIG_IOSM) += iosm/
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 7f7d364d3a51..2fe88b8be348 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -479,6 +479,7 @@ static struct pci_driver iosm_ipc_driver = {
},
.id_table = iosm_ipc_ids,
};
+module_pci_driver(iosm_ipc_driver);
int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
size_t size, dma_addr_t *mapping, int direction)
@@ -560,21 +561,3 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
IPC_CB(skb)->mapping = 0;
dev_kfree_skb(skb);
}
-
-static int __init iosm_ipc_driver_init(void)
-{
- if (pci_register_driver(&iosm_ipc_driver)) {
- pr_err("registering of IOSM PCIe driver failed");
- return -1;
- }
-
- return 0;
-}
-
-static void __exit iosm_ipc_driver_exit(void)
-{
- pci_unregister_driver(&iosm_ipc_driver);
-}
-
-module_init(iosm_ipc_driver_init);
-module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
new file mode 100644
index 000000000000..377529bbf124
--- /dev/null
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI MBIM Network driver - Network/MBIM over MHI bus
+ *
+ * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org>
+ *
+ * This driver copy some code from cdc_ncm, which is:
+ * Copyright (C) ST-Ericsson 2010-2012
+ * and cdc_mbim, which is:
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mhi.h>
+#include <linux/mii.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc_ncm.h>
+#include <linux/wwan.h>
+
+/* 3500 allows to optimize skb allocation, the skbs will basically fit in
+ * one 4K page. Large MBIM packets will simply be split over several MHI
+ * transfers and chained by the MHI net layer (zerocopy).
+ */
+#define MHI_DEFAULT_MRU 3500
+
+#define MHI_MBIM_DEFAULT_MTU 1500
+#define MHI_MAX_BUF_SZ 0xffff
+
+#define MBIM_NDP16_SIGN_MASK 0x00ffffff
+
+#define MHI_MBIM_LINK_HASH_SIZE 8
+#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
+
+struct mhi_mbim_link {
+ struct mhi_mbim_context *mbim;
+ struct net_device *ndev;
+ unsigned int session;
+
+ /* stats */
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+
+ struct hlist_node hlnode;
+};
+
+struct mhi_mbim_context {
+ struct mhi_device *mdev;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ unsigned int mru;
+ u32 rx_queue_sz;
+ u16 rx_seq;
+ u16 tx_seq;
+ struct delayed_work rx_refill;
+ spinlock_t tx_lock;
+ struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE];
+};
+
+struct mbim_tx_hdr {
+ struct usb_cdc_ncm_nth16 nth16;
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[2];
+} __packed;
+
+static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
+ unsigned int session)
+{
+ struct mhi_mbim_link *link;
+
+ hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) {
+ if (link->session == session)
+ return link;
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
+ u16 tx_seq)
+{
+ unsigned int dgram_size = skb->len;
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct mbim_tx_hdr *mbim_hdr;
+
+ /* Only one NDP is sent, containing the IP packet (no aggregation) */
+
+ /* Ensure we have enough headroom for crafting MBIM header */
+ if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
+
+ /* Fill NTB header */
+ nth16 = &mbim_hdr->nth16;
+ nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth16->wSequence = cpu_to_le16(tx_seq);
+ nth16->wBlockLength = cpu_to_le16(skb->len);
+ nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+
+ /* Fill the unique NDP */
+ ndp16 = &mbim_hdr->ndp16;
+ ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ + sizeof(struct usb_cdc_ncm_dpe16) * 2);
+ ndp16->wNextNdpIndex = 0;
+
+ /* Datagram follows the mbim header */
+ ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
+ ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
+
+ /* null termination */
+ ndp16->dpe16[1].wDatagramIndex = 0;
+ ndp16->dpe16[1].wDatagramLength = 0;
+
+ return skb;
+}
+
+static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ struct mhi_mbim_context *mbim = link->mbim;
+ unsigned long flags;
+ int err = -ENOMEM;
+
+ /* Serialize MHI channel queuing and MBIM seq */
+ spin_lock_irqsave(&mbim->tx_lock, flags);
+
+ skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq);
+ if (unlikely(!skb))
+ goto exit_unlock;
+
+ err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+
+ if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
+ netif_stop_queue(ndev);
+
+ if (!err)
+ mbim->tx_seq++;
+
+exit_unlock:
+ spin_unlock_irqrestore(&mbim->tx_lock, flags);
+
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
+ ndev->name, err);
+ dev_kfree_skb_any(skb);
+ goto exit_drop;
+ }
+
+ return NETDEV_TX_OK;
+
+exit_drop:
+ u64_stats_update_begin(&link->tx_syncp);
+ u64_stats_inc(&link->tx_dropped);
+ u64_stats_update_end(&link->tx_syncp);
+
+ return NETDEV_TX_OK;
+}
+
+static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb)
+{
+ struct usb_cdc_ncm_nth16 *nth16;
+ int len;
+
+ if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
+ sizeof(struct usb_cdc_ncm_ndp16)) {
+ net_err_ratelimited("frame too short\n");
+ return -EINVAL;
+ }
+
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
+
+ if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
+ net_err_ratelimited("invalid NTH16 signature <%#010x>\n",
+ le32_to_cpu(nth16->dwSignature));
+ return -EINVAL;
+ }
+
+ /* No limit on the block length, except the size of the data pkt */
+ len = le16_to_cpu(nth16->wBlockLength);
+ if (len > skb->len) {
+ net_err_ratelimited("NTB does not fit into the skb %u/%u\n",
+ len, skb->len);
+ return -EINVAL;
+ }
+
+ if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
+ (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
+ !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
+ net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
+ mbim->rx_seq, le16_to_cpu(nth16->wSequence));
+ }
+ mbim->rx_seq = le16_to_cpu(nth16->wSequence);
+
+ return le16_to_cpu(nth16->wNdpIndex);
+}
+
+static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
+{
+ int ret;
+
+ if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
+ net_err_ratelimited("invalid DPT16 length <%u>\n",
+ le16_to_cpu(ndp16->wLength));
+ return -EINVAL;
+ }
+
+ ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
+ / sizeof(struct usb_cdc_ncm_dpe16));
+ ret--; /* Last entry is always a NULL terminator */
+
+ if (sizeof(struct usb_cdc_ncm_ndp16) +
+ ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
+ net_err_ratelimited("Invalid nframes = %d\n", ret);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb)
+{
+ int ndpoffset;
+
+ /* Check NTB header and retrieve first NDP offset */
+ ndpoffset = mbim_rx_verify_nth16(mbim, skb);
+ if (ndpoffset < 0) {
+ net_err_ratelimited("mbim: Incorrect NTB header\n");
+ goto error;
+ }
+
+ /* Process each NDP */
+ while (1) {
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16;
+ struct mhi_mbim_link *link;
+ int nframes, n, dpeoffset;
+ unsigned int session;
+
+ if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
+ net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n",
+ ndpoffset);
+ goto error;
+ }
+
+ /* Check NDP header and retrieve number of datagrams */
+ nframes = mbim_rx_verify_ndp16(skb, &ndp16);
+ if (nframes < 0) {
+ net_err_ratelimited("mbim: Incorrect NDP16\n");
+ goto error;
+ }
+
+ /* Only IP data type supported, no DSS in MHI context */
+ if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
+ != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
+ net_err_ratelimited("mbim: Unsupported NDP type\n");
+ goto next_ndp;
+ }
+
+ session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24;
+
+ rcu_read_lock();
+
+ link = mhi_mbim_get_link_rcu(mbim, session);
+ if (!link) {
+ net_err_ratelimited("mbim: bad packet session (%u)\n", session);
+ goto unlock;
+ }
+
+ /* de-aggregate and deliver IP packets */
+ dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
+ for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
+ u16 dgram_offset, dgram_len;
+ struct sk_buff *skbn;
+
+ if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
+ break;
+
+ dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
+ dgram_len = le16_to_cpu(dpe16.wDatagramLength);
+
+ if (!dgram_offset || !dgram_len)
+ break; /* null terminator */
+
+ skbn = netdev_alloc_skb(link->ndev, dgram_len);
+ if (!skbn)
+ continue;
+
+ skb_put(skbn, dgram_len);
+ skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
+
+ switch (skbn->data[0] & 0xf0) {
+ case 0x40:
+ skbn->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skbn->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ net_err_ratelimited("%s: unknown protocol\n",
+ link->ndev->name);
+ dev_kfree_skb_any(skbn);
+ u64_stats_update_begin(&link->rx_syncp);
+ u64_stats_inc(&link->rx_errors);
+ u64_stats_update_end(&link->rx_syncp);
+ continue;
+ }
+
+ u64_stats_update_begin(&link->rx_syncp);
+ u64_stats_inc(&link->rx_packets);
+ u64_stats_add(&link->rx_bytes, skbn->len);
+ u64_stats_update_end(&link->rx_syncp);
+
+ netif_rx(skbn);
+ }
+unlock:
+ rcu_read_unlock();
+next_ndp:
+ /* Other NDP to process? */
+ ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
+ if (!ndpoffset)
+ break;
+ }
+
+ /* free skb */
+ dev_consume_skb_any(skb);
+ return;
+error:
+ dev_kfree_skb_any(skb);
+}
+
+static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim,
+ struct sk_buff *skb)
+{
+ struct sk_buff *head = mbim->skbagg_head;
+ struct sk_buff *tail = mbim->skbagg_tail;
+
+ /* This is non-paged skb chaining using frag_list */
+ if (!head) {
+ mbim->skbagg_head = skb;
+ return skb;
+ }
+
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = skb;
+ else
+ tail->next = skb;
+
+ head->len += skb->len;
+ head->data_len += skb->len;
+ head->truesize += skb->truesize;
+
+ mbim->skbagg_tail = skb;
+
+ return mbim->skbagg_head;
+}
+
+static void mhi_net_rx_refill_work(struct work_struct *work)
+{
+ struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
+ rx_refill.work);
+ struct mhi_device *mdev = mbim->mdev;
+ int err;
+
+ while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
+ struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL);
+
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
+ MHI_DEFAULT_MRU, MHI_EOT);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ break;
+ }
+
+ /* Do not hog the CPU if rx buffers are consumed faster than
+ * queued (unlikely).
+ */
+ cond_resched();
+ }
+
+ /* If we're still starved of rx buffers, reschedule later */
+ if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz)
+ schedule_delayed_work(&mbim->rx_refill, HZ / 2);
+}
+
+static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ int free_desc_count;
+
+ free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+
+ if (unlikely(mhi_res->transaction_status)) {
+ switch (mhi_res->transaction_status) {
+ case -EOVERFLOW:
+ /* Packet has been split over multiple transfers */
+ skb_put(skb, mhi_res->bytes_xferd);
+ mhi_net_skb_agg(mbim, skb);
+ break;
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the DL channel */
+ dev_kfree_skb_any(skb);
+ return;
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ if (mbim->skbagg_head) {
+ /* Aggregate the final fragment */
+ skb = mhi_net_skb_agg(mbim, skb);
+ mbim->skbagg_head = NULL;
+ }
+
+ mhi_mbim_rx(mbim, skb);
+ }
+
+ /* Refill if RX buffers queue becomes low */
+ if (free_desc_count >= mbim->rx_queue_sz / 2)
+ schedule_delayed_work(&mbim->rx_refill, 0);
+}
+
+static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&link->rx_syncp);
+ stats->rx_packets = u64_stats_read(&link->rx_packets);
+ stats->rx_bytes = u64_stats_read(&link->rx_bytes);
+ stats->rx_errors = u64_stats_read(&link->rx_errors);
+ } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&link->tx_syncp);
+ stats->tx_packets = u64_stats_read(&link->tx_packets);
+ stats->tx_bytes = u64_stats_read(&link->tx_bytes);
+ stats->tx_errors = u64_stats_read(&link->tx_errors);
+ stats->tx_dropped = u64_stats_read(&link->tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start));
+}
+
+static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ struct net_device *ndev = skb->dev;
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+
+ /* Hardware has consumed the buffer, so free the skb (which is not
+ * freed by the MHI stack) and perform accounting.
+ */
+ dev_consume_skb_any(skb);
+
+ u64_stats_update_begin(&link->tx_syncp);
+ if (unlikely(mhi_res->transaction_status)) {
+ /* MHI layer stopping/resetting the UL channel */
+ if (mhi_res->transaction_status == -ENOTCONN) {
+ u64_stats_update_end(&link->tx_syncp);
+ return;
+ }
+
+ u64_stats_inc(&link->tx_errors);
+ } else {
+ u64_stats_inc(&link->tx_packets);
+ u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd);
+ }
+ u64_stats_update_end(&link->tx_syncp);
+
+ if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
+ netif_wake_queue(ndev);
+}
+
+static int mhi_mbim_ndo_open(struct net_device *ndev)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+
+ /* Feed the MHI rx buffer pool */
+ schedule_delayed_work(&link->mbim->rx_refill, 0);
+
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_mbim_ndo_stop(struct net_device *ndev)
+{
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static const struct net_device_ops mhi_mbim_ndo = {
+ .ndo_open = mhi_mbim_ndo_open,
+ .ndo_stop = mhi_mbim_ndo_stop,
+ .ndo_start_xmit = mhi_mbim_ndo_xmit,
+ .ndo_get_stats64 = mhi_mbim_ndo_get_stats64,
+};
+
+static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+ struct mhi_mbim_context *mbim = ctxt;
+
+ link->session = if_id;
+ link->mbim = mbim;
+ link->ndev = ndev;
+ u64_stats_init(&link->rx_syncp);
+ u64_stats_init(&link->tx_syncp);
+
+ rcu_read_lock();
+ if (mhi_mbim_get_link_rcu(mbim, if_id)) {
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ rcu_read_unlock();
+
+ /* Already protected by RTNL lock */
+ hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]);
+
+ return register_netdevice(ndev);
+}
+
+static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev,
+ struct list_head *head)
+{
+ struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
+
+ hlist_del_init_rcu(&link->hlnode);
+ synchronize_rcu();
+
+ unregister_netdevice_queue(ndev, head);
+}
+
+static void mhi_mbim_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_RAWIP;
+ ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_mbim_ndo;
+ ndev->mtu = MHI_MBIM_DEFAULT_MTU;
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
+ ndev->tx_queue_len = 1000;
+}
+
+static const struct wwan_ops mhi_mbim_wwan_ops = {
+ .priv_size = sizeof(struct mhi_mbim_link),
+ .setup = mhi_mbim_setup,
+ .newlink = mhi_mbim_newlink,
+ .dellink = mhi_mbim_dellink,
+};
+
+static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_mbim_context *mbim;
+ int err;
+
+ mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
+ if (!mbim)
+ return -ENOMEM;
+
+ spin_lock_init(&mbim->tx_lock);
+ dev_set_drvdata(&mhi_dev->dev, mbim);
+ mbim->mdev = mhi_dev;
+ mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU;
+
+ INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
+
+ /* Start MHI channels */
+ err = mhi_prepare_for_transfer(mhi_dev, 0);
+ if (err)
+ return err;
+
+ /* Number of transfer descriptors determines size of the queue */
+ mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+
+ /* Register wwan link ops with MHI controller representing WWAN instance */
+ return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
+}
+
+static void mhi_mbim_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_unprepare_from_transfer(mhi_dev);
+ cancel_delayed_work_sync(&mbim->rx_refill);
+ wwan_unregister_ops(&cntrl->mhi_dev->dev);
+ kfree_skb(mbim->skbagg_head);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static const struct mhi_device_id mhi_mbim_id_table[] = {
+ /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
+ { .chan = "IP_HW0_MBIM", .driver_data = 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table);
+
+static struct mhi_driver mhi_mbim_driver = {
+ .probe = mhi_mbim_probe,
+ .remove = mhi_mbim_remove,
+ .dl_xfer_cb = mhi_mbim_dl_callback,
+ .ul_xfer_cb = mhi_mbim_ul_callback,
+ .id_table = mhi_mbim_id_table,
+ .driver = {
+ .name = "mhi_wwan_mbim",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_driver(mhi_mbim_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Network/MBIM over MHI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 35ece98134c0..d293ab688044 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -359,8 +359,8 @@ struct wwan_port *wwan_create_port(struct device *parent,
{
struct wwan_device *wwandev;
struct wwan_port *port;
- int minor, err = -ENOMEM;
char namefmt[0x20];
+ int minor, err;
if (type > WWAN_PORT_MAX || !ops)
return ERR_PTR(-EINVAL);
@@ -374,11 +374,14 @@ struct wwan_port *wwan_create_port(struct device *parent,
/* A port is exposed as character device, get a minor */
minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
- if (minor < 0)
+ if (minor < 0) {
+ err = minor;
goto error_wwandev_remove;
+ }
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
+ err = -ENOMEM;
ida_free(&minors, minor);
goto error_wwandev_remove;
}
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 528745862738..c6b3334f24c9 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -38,7 +38,7 @@
#define NCI_OP_PROP_SET_PDATA_OID 0x23
struct fdp_nci_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
struct fdp_i2c_phy *phy;
struct nci_dev *ndev;
@@ -52,7 +52,7 @@ struct fdp_nci_info {
u32 limited_otp_version;
u8 key_index;
- u8 *fw_vsc_cfg;
+ const u8 *fw_vsc_cfg;
u8 clock_type;
u32 clock_freq;
@@ -65,7 +65,7 @@ struct fdp_nci_info {
wait_queue_head_t setup_wq;
};
-static u8 nci_core_get_config_otp_ram_version[5] = {
+static const u8 nci_core_get_config_otp_ram_version[5] = {
0x04,
NCI_PARAM_ID_FW_RAM_VERSION,
NCI_PARAM_ID_FW_OTP_VERSION,
@@ -111,7 +111,7 @@ static inline int fdp_nci_patch_cmd(struct nci_dev *ndev, u8 type)
}
static inline int fdp_nci_set_production_data(struct nci_dev *ndev, u8 len,
- char *data)
+ const char *data)
{
return nci_prop_cmd(ndev, NCI_OP_PROP_SET_PDATA_OID, len, data);
}
@@ -236,7 +236,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
static int fdp_nci_open(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
+ const struct fdp_nci_info *info = nci_get_drvdata(ndev);
return info->phy_ops->enable(info->phy);
}
@@ -260,7 +260,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
- u8 *data;
+ const u8 *data;
int r;
r = request_firmware(&info->ram_patch, FDP_RAM_PATCH_NAME, dev);
@@ -269,15 +269,15 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
return r;
}
- data = (u8 *) info->ram_patch->data;
+ data = info->ram_patch->data;
info->ram_patch_version =
data[FDP_FW_HEADER_SIZE] |
(data[FDP_FW_HEADER_SIZE + 1] << 8) |
(data[FDP_FW_HEADER_SIZE + 2] << 16) |
(data[FDP_FW_HEADER_SIZE + 3] << 24);
- dev_dbg(dev, "RAM patch version: %d, size: %d\n",
- info->ram_patch_version, (int) info->ram_patch->size);
+ dev_dbg(dev, "RAM patch version: %d, size: %zu\n",
+ info->ram_patch_version, info->ram_patch->size);
r = request_firmware(&info->otp_patch, FDP_OTP_PATCH_NAME, dev);
@@ -293,8 +293,8 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
(data[FDP_FW_HEADER_SIZE+2] << 16) |
(data[FDP_FW_HEADER_SIZE+3] << 24);
- dev_dbg(dev, "OTP patch version: %d, size: %d\n",
- info->otp_patch_version, (int) info->otp_patch->size);
+ dev_dbg(dev, "OTP patch version: %d, size: %zu\n",
+ info->otp_patch_version, info->otp_patch->size);
return 0;
}
@@ -610,8 +610,9 @@ static int fdp_nci_core_get_config_rsp_packet(struct nci_dev *ndev,
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
- struct nci_core_get_config_rsp *rsp = (void *) skb->data;
- u8 i, *p;
+ const struct nci_core_get_config_rsp *rsp = (void *) skb->data;
+ unsigned int i;
+ const u8 *p;
if (rsp->status == NCI_STATUS_OK) {
@@ -651,7 +652,7 @@ static int fdp_nci_core_get_config_rsp_packet(struct nci_dev *ndev,
return 0;
}
-static struct nci_driver_ops fdp_core_ops[] = {
+static const struct nci_driver_ops fdp_core_ops[] = {
{
.opcode = NCI_OP_CORE_GET_CONFIG_RSP,
.rsp = fdp_nci_core_get_config_rsp_packet,
@@ -662,7 +663,7 @@ static struct nci_driver_ops fdp_core_ops[] = {
},
};
-static struct nci_driver_ops fdp_prop_ops[] = {
+static const struct nci_driver_ops fdp_prop_ops[] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROP, NCI_OP_PROP_PATCH_OID),
.rsp = fdp_nci_prop_patch_rsp_packet,
@@ -675,7 +676,7 @@ static struct nci_driver_ops fdp_prop_ops[] = {
},
};
-static struct nci_ops nci_ops = {
+static const struct nci_ops nci_ops = {
.open = fdp_nci_open,
.close = fdp_nci_close,
.send = fdp_nci_send,
@@ -687,10 +688,10 @@ static struct nci_ops nci_ops = {
.n_core_ops = ARRAY_SIZE(fdp_core_ops),
};
-int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
+int fdp_nci_probe(struct fdp_i2c_phy *phy, const struct nfc_phy_ops *phy_ops,
struct nci_dev **ndevp, int tx_headroom,
int tx_tailroom, u8 clock_type, u32 clock_freq,
- u8 *fw_vsc_cfg)
+ const u8 *fw_vsc_cfg)
{
struct device *dev = &phy->i2c_dev->dev;
struct fdp_nci_info *info;
@@ -718,6 +719,7 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
NFC_PROTO_NFC_DEP_MASK |
NFC_PROTO_ISO15693_MASK;
+ BUILD_BUG_ON(ARRAY_SIZE(fdp_prop_ops) > NCI_MAX_PROPRIETARY_CMD);
ndev = nci_allocate_device(&nci_ops, protocols, tx_headroom,
tx_tailroom);
if (!ndev) {
diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h
index ead3b21ccae6..2e9161a4d7bf 100644
--- a/drivers/nfc/fdp/fdp.h
+++ b/drivers/nfc/fdp/fdp.h
@@ -21,9 +21,9 @@ struct fdp_i2c_phy {
uint16_t next_read_size;
};
-int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
+int fdp_nci_probe(struct fdp_i2c_phy *phy, const struct nfc_phy_ops *phy_ops,
struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
- u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
+ u8 clock_type, u32 clock_freq, const u8 *fw_vsc_cfg);
void fdp_nci_remove(struct nci_dev *ndev);
#endif /* __LOCAL_FDP_H_ */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index c5596e514648..051c43a2a52f 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -36,7 +36,7 @@
print_hex_dump(KERN_DEBUG, prefix": ", DUMP_PREFIX_OFFSET, \
16, 1, (skb)->data, (skb)->len, 0)
-static void fdp_nci_i2c_reset(struct fdp_i2c_phy *phy)
+static void fdp_nci_i2c_reset(const struct fdp_i2c_phy *phy)
{
/* Reset RST/WakeUP for at least 100 micro-second */
gpiod_set_value_cansleep(phy->power_gpio, FDP_POWER_OFF);
@@ -47,7 +47,7 @@ static void fdp_nci_i2c_reset(struct fdp_i2c_phy *phy)
static int fdp_nci_i2c_enable(void *phy_id)
{
- struct fdp_i2c_phy *phy = phy_id;
+ const struct fdp_i2c_phy *phy = phy_id;
fdp_nci_i2c_reset(phy);
@@ -56,7 +56,7 @@ static int fdp_nci_i2c_enable(void *phy_id)
static void fdp_nci_i2c_disable(void *phy_id)
{
- struct fdp_i2c_phy *phy = phy_id;
+ const struct fdp_i2c_phy *phy = phy_id;
fdp_nci_i2c_reset(phy);
}
@@ -120,7 +120,7 @@ static int fdp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
return r;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = fdp_nci_i2c_write,
.enable = fdp_nci_i2c_enable,
.disable = fdp_nci_i2c_disable,
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index e56cea716cd2..f9cca885beec 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -202,7 +202,7 @@ err:
return r;
}
-static int mei_nfc_send(struct nfc_mei_phy *phy, u8 *buf, size_t length)
+static int mei_nfc_send(struct nfc_mei_phy *phy, const u8 *buf, size_t length)
{
struct mei_nfc_hdr *hdr;
u8 *mei_buf;
@@ -362,7 +362,7 @@ static void nfc_mei_phy_disable(void *phy_id)
phy->powered = 0;
}
-struct nfc_phy_ops mei_phy_ops = {
+const struct nfc_phy_ops mei_phy_ops = {
.write = nfc_mei_phy_write,
.enable = nfc_mei_phy_enable,
.disable = nfc_mei_phy_disable,
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index 51bd44f5f3b8..2b1edb3eba15 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -45,7 +45,7 @@ struct nfc_mei_phy {
int hard_fault;
};
-extern struct nfc_phy_ops mei_phy_ops;
+extern const struct nfc_phy_ops mei_phy_ops;
struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device);
void nfc_mei_phy_free(struct nfc_mei_phy *phy);
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index dd78d987e6c9..86f593c73ed6 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -73,7 +73,7 @@ static void microread_i2c_remove_len_crc(struct sk_buff *skb)
skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM);
}
-static int check_crc(struct sk_buff *skb)
+static int check_crc(const struct sk_buff *skb)
{
int i;
u8 crc = 0;
@@ -225,7 +225,7 @@ static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = microread_i2c_write,
.enable = microread_i2c_enable,
.disable = microread_i2c_disable,
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index b1d3975e8a81..9d83ccebd434 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -131,7 +131,7 @@
#define MICROREAD_ELT_ID_SE2 0x04
#define MICROREAD_ELT_ID_SE3 0x05
-static struct nfc_hci_gate microread_gates[] = {
+static const struct nfc_hci_gate microread_gates[] = {
{MICROREAD_GATE_ID_ADM, MICROREAD_PIPE_ID_ADMIN},
{MICROREAD_GATE_ID_LOOPBACK, MICROREAD_PIPE_ID_HDS_LOOPBACK},
{MICROREAD_GATE_ID_IDT, MICROREAD_PIPE_ID_HDS_IDT},
@@ -152,7 +152,7 @@ static struct nfc_hci_gate microread_gates[] = {
#define MICROREAD_CMD_TAILROOM 2
struct microread_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
void *phy_id;
struct nfc_hci_dev *hdev;
@@ -358,7 +358,7 @@ static int microread_complete_target_discovered(struct nfc_hci_dev *hdev,
static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
int err)
{
- struct microread_info *info = context;
+ const struct microread_info *info = context;
switch (info->async_cb_type) {
case MICROREAD_CB_TYPE_READER_ALL:
@@ -625,7 +625,7 @@ static int microread_event_received(struct nfc_hci_dev *hdev, u8 pipe,
return r;
}
-static struct nfc_hci_ops microread_hci_ops = {
+static const struct nfc_hci_ops microread_hci_ops = {
.open = microread_open,
.close = microread_close,
.hci_ready = microread_hci_ready,
@@ -641,9 +641,9 @@ static struct nfc_hci_ops microread_hci_ops = {
.event_received = microread_event_received,
};
-int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev)
+int microread_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ const char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, struct nfc_hci_dev **hdev)
{
struct microread_info *info;
unsigned long quirks = 0;
diff --git a/drivers/nfc/microread/microread.h b/drivers/nfc/microread/microread.h
index 044f5e456375..2ee7ccfa22dd 100644
--- a/drivers/nfc/microread/microread.h
+++ b/drivers/nfc/microread/microread.h
@@ -10,9 +10,9 @@
#define DRIVER_DESC "NFC driver for microread"
-int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev);
+int microread_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ const char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, struct nfc_hci_dev **hdev);
void microread_remove(struct nfc_hci_dev *hdev);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index aaccb8b76b3e..edac56b01fd1 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -129,7 +129,7 @@ static void fw_dnld_timeout(struct timer_list *t)
}
static int process_state_reset(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_reset_ntf) != skb->len ||
memcmp(skb->data, nci_pattern_core_reset_ntf,
@@ -145,7 +145,8 @@ static int process_state_reset(struct nfcmrvl_private *priv,
return 0;
}
-static int process_state_init(struct nfcmrvl_private *priv, struct sk_buff *skb)
+static int process_state_init(struct nfcmrvl_private *priv,
+ const struct sk_buff *skb)
{
struct nci_core_set_config_cmd cmd;
@@ -175,7 +176,7 @@ static void create_lc(struct nfcmrvl_private *priv)
}
static int process_state_set_ref_clock(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
struct nci_core_set_config_cmd cmd;
@@ -221,7 +222,7 @@ static int process_state_set_ref_clock(struct nfcmrvl_private *priv,
}
static int process_state_set_hi_config(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_set_config_rsp) != skb->len ||
memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len))
@@ -232,7 +233,7 @@ static int process_state_set_hi_config(struct nfcmrvl_private *priv,
}
static int process_state_open_lc(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_conn_create_rsp) >= skb->len ||
memcmp(skb->data, nci_pattern_core_conn_create_rsp,
@@ -347,7 +348,7 @@ static int process_state_fw_dnld(struct nfcmrvl_private *priv,
}
static int process_state_close_lc(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_core_conn_close_rsp) != skb->len ||
memcmp(skb->data, nci_pattern_core_conn_close_rsp, skb->len))
@@ -358,7 +359,8 @@ static int process_state_close_lc(struct nfcmrvl_private *priv,
return 0;
}
-static int process_state_boot(struct nfcmrvl_private *priv, struct sk_buff *skb)
+static int process_state_boot(struct nfcmrvl_private *priv,
+ const struct sk_buff *skb)
{
if (sizeof(nci_pattern_proprietary_boot_rsp) != skb->len ||
memcmp(skb->data, nci_pattern_proprietary_boot_rsp, skb->len))
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 59a529e72d96..c38b228006fd 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -146,7 +146,7 @@ static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
{
}
-static struct nfcmrvl_if_ops i2c_ops = {
+static const struct nfcmrvl_if_ops i2c_ops = {
.nci_open = nfcmrvl_i2c_nci_open,
.nci_close = nfcmrvl_i2c_nci_close,
.nci_send = nfcmrvl_i2c_nci_send,
@@ -182,8 +182,8 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
static int nfcmrvl_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_i2c_drv_data *drv_data;
- struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_platform_data config;
int ret;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index a4620b480c4f..2fcf545012b1 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -81,7 +81,7 @@ static int nfcmrvl_nci_fw_download(struct nci_dev *ndev,
return nfcmrvl_fw_dnld_start(ndev, firmware_name);
}
-static struct nci_ops nfcmrvl_nci_ops = {
+static const struct nci_ops nfcmrvl_nci_ops = {
.open = nfcmrvl_nci_open,
.close = nfcmrvl_nci_close,
.send = nfcmrvl_nci_send,
@@ -91,9 +91,9 @@ static struct nci_ops nfcmrvl_nci_ops = {
struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
void *drv_data,
- struct nfcmrvl_if_ops *ops,
+ const struct nfcmrvl_if_ops *ops,
struct device *dev,
- struct nfcmrvl_platform_data *pdata)
+ const struct nfcmrvl_platform_data *pdata)
{
struct nfcmrvl_private *priv;
int rc;
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index a715543bc9bf..165bd0a95190 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -77,7 +77,7 @@ struct nfcmrvl_private {
/* PHY type */
enum nfcmrvl_phy phy;
/* Low level driver ops */
- struct nfcmrvl_if_ops *if_ops;
+ const struct nfcmrvl_if_ops *if_ops;
};
struct nfcmrvl_if_ops {
@@ -92,9 +92,9 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb);
struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
void *drv_data,
- struct nfcmrvl_if_ops *ops,
+ const struct nfcmrvl_if_ops *ops,
struct device *dev,
- struct nfcmrvl_platform_data *pdata);
+ const struct nfcmrvl_platform_data *pdata);
void nfcmrvl_chip_reset(struct nfcmrvl_private *priv);
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 66696321c645..b182ab2e03c0 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -99,7 +99,7 @@ static void nfcmrvl_spi_nci_update_config(struct nfcmrvl_private *priv,
drv_data->nci_spi->xfer_speed_hz = config->clk;
}
-static struct nfcmrvl_if_ops spi_ops = {
+static const struct nfcmrvl_if_ops spi_ops = {
.nci_open = nfcmrvl_spi_nci_open,
.nci_close = nfcmrvl_spi_nci_close,
.nci_send = nfcmrvl_spi_nci_send,
@@ -129,7 +129,7 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node,
static int nfcmrvl_spi_probe(struct spi_device *spi)
{
- struct nfcmrvl_platform_data *pdata;
+ const struct nfcmrvl_platform_data *pdata;
struct nfcmrvl_platform_data config;
struct nfcmrvl_spi_drv_data *drv_data;
int ret = 0;
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 50d86c90b9dd..9c92cbdc42f0 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -49,7 +49,7 @@ static void nfcmrvl_uart_nci_update_config(struct nfcmrvl_private *priv,
config->flow_control);
}
-static struct nfcmrvl_if_ops uart_ops = {
+static const struct nfcmrvl_if_ops uart_ops = {
.nci_open = nfcmrvl_uart_nci_open,
.nci_close = nfcmrvl_uart_nci_close,
.nci_send = nfcmrvl_uart_nci_send,
@@ -98,8 +98,8 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
{
struct nfcmrvl_private *priv;
- struct nfcmrvl_platform_data *pdata = NULL;
struct nfcmrvl_platform_data config;
+ const struct nfcmrvl_platform_data *pdata = NULL;
struct device *dev = nu->tty->dev;
/*
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 9d649b45300b..a99aedff795d 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -264,7 +264,7 @@ done:
return err;
}
-static struct nfcmrvl_if_ops usb_ops = {
+static const struct nfcmrvl_if_ops usb_ops = {
.nci_open = nfcmrvl_usb_nci_open,
.nci_close = nfcmrvl_usb_nci_close,
.nci_send = nfcmrvl_usb_nci_send,
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index dd27c85190d3..85bf8d586c70 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -239,7 +239,7 @@ static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb,
static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev)
{
- struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
+ const struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
nfcsim_link_recv_cancel(dev->link_in);
}
@@ -319,7 +319,7 @@ static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
return nfcsim_send(ddev, NULL, timeout, cb, arg);
}
-static struct nfc_digital_ops nfcsim_digital_ops = {
+static const struct nfc_digital_ops nfcsim_digital_ops = {
.in_configure_hw = nfcsim_in_configure_hw,
.in_send_cmd = nfcsim_in_send_cmd,
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index 2b0c7232e91f..518e2afb43a8 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -83,7 +83,7 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
return r;
}
-static struct nci_ops nxp_nci_ops = {
+static const struct nci_ops nxp_nci_ops = {
.open = nxp_nci_open,
.close = nxp_nci_close,
.send = nxp_nci_send,
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index cd64bfe20402..2f3f3fe9a0ba 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2623,7 +2623,7 @@ static int pn533_dev_down(struct nfc_dev *nfc_dev)
return ret;
}
-static struct nfc_ops pn533_nfc_ops = {
+static const struct nfc_ops pn533_nfc_ops = {
.dev_up = pn533_dev_up,
.dev_down = pn533_dev_down,
.dep_link_up = pn533_dep_link_up,
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index de59e439c369..37d26f01986b 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -515,7 +515,7 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = pn544_hci_i2c_write,
.enable = pn544_hci_i2c_enable,
.disable = pn544_hci_i2c_disable,
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index b788870473e8..092f03b80a78 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -86,7 +86,7 @@ enum pn544_state {
#define PN544_HCI_CMD_ATTREQUEST 0x12
#define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13
-static struct nfc_hci_gate pn544_gates[] = {
+static const struct nfc_hci_gate pn544_gates[] = {
{NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
{NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
{NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
@@ -108,7 +108,7 @@ static struct nfc_hci_gate pn544_gates[] = {
#define PN544_CMDS_HEADROOM 2
struct pn544_hci_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
void *phy_id;
struct nfc_hci_dev *hdev;
@@ -809,7 +809,7 @@ static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
#define PN544_SE_MODE_ON 0x01
static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
{
- struct nfc_se *se;
+ const struct nfc_se *se;
u8 enable = PN544_SE_MODE_ON;
static struct uicc_gatelist {
u8 head;
@@ -864,7 +864,7 @@ static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
{
- struct nfc_se *se;
+ const struct nfc_se *se;
u8 disable = PN544_SE_MODE_OFF;
se = nfc_find_se(hdev->ndev, se_idx);
@@ -881,7 +881,7 @@ static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
}
}
-static struct nfc_hci_ops pn544_hci_ops = {
+static const struct nfc_hci_ops pn544_hci_ops = {
.open = pn544_hci_open,
.close = pn544_hci_close,
.hci_ready = pn544_hci_ready,
@@ -901,9 +901,10 @@ static struct nfc_hci_ops pn544_hci_ops = {
.disable_se = pn544_hci_disable_se,
};
-int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- fw_download_t fw_download, struct nfc_hci_dev **hdev)
+int pn544_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, fw_download_t fw_download,
+ struct nfc_hci_dev **hdev)
{
struct pn544_hci_info *info;
u32 protocols;
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index 5634ba215ead..c6fe3e11e0c8 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -16,9 +16,10 @@
typedef int (*fw_download_t)(void *context, const char *firmware_name,
u8 hw_variant);
-int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
- int phy_headroom, int phy_tailroom, int phy_payload,
- fw_download_t fw_download, struct nfc_hci_dev **hdev);
+int pn544_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ char *llc_name, int phy_headroom, int phy_tailroom,
+ int phy_payload, fw_download_t fw_download,
+ struct nfc_hci_dev **hdev);
void pn544_hci_remove(struct nfc_hci_dev *hdev);
#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 4df926cc37d0..517376c43b86 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -217,7 +217,7 @@ struct port100_protocol {
u8 value;
} __packed;
-static struct port100_protocol
+static const struct port100_protocol
in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
{ PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 },
@@ -391,7 +391,7 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
},
};
-static struct port100_protocol
+static const struct port100_protocol
tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
{ PORT100_TG_PROT_END, 0 },
@@ -526,7 +526,7 @@ static inline u8 port100_checksum(u16 value)
}
/* The rule: sum(data elements) + checksum = 0 */
-static u8 port100_data_checksum(u8 *data, int datalen)
+static u8 port100_data_checksum(const u8 *data, int datalen)
{
u8 sum = 0;
int i;
@@ -568,10 +568,10 @@ static void port100_tx_update_payload_len(void *_frame, int len)
le16_add_cpu(&frame->datalen, len);
}
-static bool port100_rx_frame_is_valid(void *_frame)
+static bool port100_rx_frame_is_valid(const void *_frame)
{
u8 checksum;
- struct port100_frame *frame = _frame;
+ const struct port100_frame *frame = _frame;
if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) ||
frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT))
@@ -589,23 +589,24 @@ static bool port100_rx_frame_is_valid(void *_frame)
return true;
}
-static bool port100_rx_frame_is_ack(struct port100_ack_frame *frame)
+static bool port100_rx_frame_is_ack(const struct port100_ack_frame *frame)
{
return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) &&
frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK));
}
-static inline int port100_rx_frame_size(void *frame)
+static inline int port100_rx_frame_size(const void *frame)
{
- struct port100_frame *f = frame;
+ const struct port100_frame *f = frame;
return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) +
PORT100_FRAME_TAIL_LEN;
}
-static bool port100_rx_frame_is_cmd_response(struct port100 *dev, void *frame)
+static bool port100_rx_frame_is_cmd_response(const struct port100 *dev,
+ const void *frame)
{
- struct port100_frame *f = frame;
+ const struct port100_frame *f = frame;
return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code));
}
@@ -655,7 +656,8 @@ sched_wq:
schedule_work(&dev->cmd_complete_work);
}
-static int port100_submit_urb_for_response(struct port100 *dev, gfp_t flags)
+static int port100_submit_urb_for_response(const struct port100 *dev,
+ gfp_t flags)
{
dev->in_urb->complete = port100_recv_response;
@@ -666,7 +668,7 @@ static void port100_recv_ack(struct urb *urb)
{
struct port100 *dev = urb->context;
struct port100_cmd *cmd = dev->cmd;
- struct port100_ack_frame *in_frame;
+ const struct port100_ack_frame *in_frame;
int rc;
cmd->status = urb->status;
@@ -708,7 +710,7 @@ sched_wq:
schedule_work(&dev->cmd_complete_work);
}
-static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
+static int port100_submit_urb_for_ack(const struct port100 *dev, gfp_t flags)
{
dev->in_urb->complete = port100_recv_ack;
@@ -753,8 +755,9 @@ static int port100_send_ack(struct port100 *dev)
return rc;
}
-static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
- struct sk_buff *in, int in_len)
+static int port100_send_frame_async(struct port100 *dev,
+ const struct sk_buff *out,
+ const struct sk_buff *in, int in_len)
{
int rc;
@@ -960,7 +963,7 @@ static void port100_abort_cmd(struct nfc_digital_dev *ddev)
usb_kill_urb(dev->in_urb);
}
-static struct sk_buff *port100_alloc_skb(struct port100 *dev, unsigned int size)
+static struct sk_buff *port100_alloc_skb(const struct port100 *dev, unsigned int size)
{
struct sk_buff *skb;
@@ -1098,7 +1101,7 @@ static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf)
static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param)
{
struct port100 *dev = nfc_digital_get_drvdata(ddev);
- struct port100_protocol *protocols;
+ const struct port100_protocol *protocols;
struct sk_buff *skb;
struct sk_buff *resp;
int num_protocols;
@@ -1152,7 +1155,7 @@ static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type,
static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
struct sk_buff *resp)
{
- struct port100_cb_arg *cb_arg = arg;
+ const struct port100_cb_arg *cb_arg = arg;
nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
u32 status;
int rc;
@@ -1255,7 +1258,7 @@ static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf)
static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param)
{
struct port100 *dev = nfc_digital_get_drvdata(ddev);
- struct port100_protocol *protocols;
+ const struct port100_protocol *protocols;
struct sk_buff *skb;
struct sk_buff *resp;
int rc;
@@ -1330,7 +1333,7 @@ static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg,
struct sk_buff *resp)
{
u32 status;
- struct port100_cb_arg *cb_arg = arg;
+ const struct port100_cb_arg *cb_arg = arg;
nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
struct port100_tg_comm_rf_res *hdr;
@@ -1453,7 +1456,7 @@ static int port100_listen_mdaa(struct nfc_digital_dev *ddev,
static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg)
{
- struct port100 *dev = nfc_digital_get_drvdata(ddev);
+ const struct port100 *dev = nfc_digital_get_drvdata(ddev);
struct sk_buff *skb;
skb = port100_alloc_skb(dev, 0);
@@ -1463,7 +1466,7 @@ static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
}
-static struct nfc_digital_ops port100_digital_ops = {
+static const struct nfc_digital_ops port100_digital_ops = {
.in_configure_hw = port100_in_configure_hw,
.in_send_cmd = port100_in_send_cmd,
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index 865d3e3d1528..1c412007fabb 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -143,11 +143,13 @@ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
return nci_core_init(info->ndev);
}
-static struct nci_ops s3fwrn5_nci_ops = {
+static const struct nci_ops s3fwrn5_nci_ops = {
.open = s3fwrn5_nci_open,
.close = s3fwrn5_nci_close,
.send = s3fwrn5_nci_send,
.post_setup = s3fwrn5_nci_post_setup,
+ .prop_ops = s3fwrn5_nci_prop_ops,
+ .n_prop_ops = ARRAY_SIZE(s3fwrn5_nci_prop_ops),
};
int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
@@ -167,9 +169,6 @@ int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
- s3fwrn5_nci_get_prop_ops(&s3fwrn5_nci_ops.prop_ops,
- &s3fwrn5_nci_ops.n_prop_ops);
-
info->ndev = nci_allocate_device(&s3fwrn5_nci_ops,
S3FWRN5_NFC_PROTOCOLS, 0, 0);
if (!info->ndev)
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index e3e72b8a29e3..1af7a1e632cf 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -421,10 +421,9 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
- ret = PTR_ERR(tfm);
dev_err(&fw_info->ndev->nfc_dev->dev,
"Cannot allocate shash (code=%pe)\n", tfm);
- goto out;
+ return PTR_ERR(tfm);
}
ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
@@ -433,7 +432,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Cannot compute hash (code=%d)\n", ret);
- goto out;
+ return ret;
}
/* Firmware update process */
@@ -446,7 +445,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret < 0) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Unable to enter update mode\n");
- goto out;
+ return ret;
}
for (off = 0; off < image_size; off += fw_info->sector_size) {
@@ -455,7 +454,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret < 0) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Firmware update error (code=%d)\n", ret);
- goto out;
+ return ret;
}
}
@@ -463,13 +462,12 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
if (ret < 0) {
dev_err(&fw_info->ndev->nfc_dev->dev,
"Unable to complete update mode\n");
- goto out;
+ return ret;
}
dev_info(&fw_info->ndev->nfc_dev->dev,
"Firmware update: success\n");
-out:
return ret;
}
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index f042d3eaf8f6..e374e670b36b 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -20,7 +20,7 @@ static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
return 0;
}
-static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
+const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
NCI_PROP_SET_RFREG),
@@ -43,12 +43,6 @@ static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
},
};
-void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n)
-{
- *ops = s3fwrn5_nci_prop_ops;
- *n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
-}
-
#define S3FWRN5_RFREG_SECTION_SIZE 252
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
index a80f0fb082a8..c2d906591e9e 100644
--- a/drivers/nfc/s3fwrn5/nci.h
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -50,7 +50,7 @@ struct nci_prop_fw_cfg_rsp {
__u8 status;
};
-void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n);
+extern const struct nci_driver_ops s3fwrn5_nci_prop_ops[4];
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
#endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/st-nci/core.c b/drivers/nfc/st-nci/core.c
index 110ff1281e5f..72bb51efdf9c 100644
--- a/drivers/nfc/st-nci/core.c
+++ b/drivers/nfc/st-nci/core.c
@@ -86,7 +86,7 @@ static int st_nci_prop_rsp_packet(struct nci_dev *ndev,
return 0;
}
-static struct nci_driver_ops st_nci_prop_ops[] = {
+static const struct nci_driver_ops st_nci_prop_ops[] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
ST_NCI_CORE_PROP),
@@ -94,7 +94,7 @@ static struct nci_driver_ops st_nci_prop_ops[] = {
},
};
-static struct nci_ops st_nci_ops = {
+static const struct nci_ops st_nci_ops = {
.init = st_nci_init,
.open = st_nci_open,
.close = st_nci_close,
@@ -131,6 +131,7 @@ int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
| NFC_PROTO_ISO15693_MASK
| NFC_PROTO_NFC_DEP_MASK;
+ BUILD_BUG_ON(ARRAY_SIZE(st_nci_prop_ops) > NCI_MAX_PROPRIETARY_CMD);
ndlc->ndev = nci_allocate_device(&st_nci_ops, protocols,
phy_headroom, phy_tailroom);
if (!ndlc->ndev) {
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 46981405e8b1..ccf6152ebb9f 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -186,7 +186,7 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = st_nci_i2c_write,
.enable = st_nci_i2c_enable,
.disable = st_nci_i2c_disable,
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 5d74c674368a..e9dc313b333e 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -253,9 +253,9 @@ static void ndlc_t2_timeout(struct timer_list *t)
schedule_work(&ndlc->sm_work);
}
-int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
- int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id,
- struct st_nci_se_status *se_status)
+int ndlc_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ struct device *dev, int phy_headroom, int phy_tailroom,
+ struct llt_ndlc **ndlc_id, struct st_nci_se_status *se_status)
{
struct llt_ndlc *ndlc;
diff --git a/drivers/nfc/st-nci/ndlc.h b/drivers/nfc/st-nci/ndlc.h
index 066e2fd75238..c24ce9b0df52 100644
--- a/drivers/nfc/st-nci/ndlc.h
+++ b/drivers/nfc/st-nci/ndlc.h
@@ -16,7 +16,7 @@ struct st_nci_se_status;
/* Low Level Transport description */
struct llt_ndlc {
struct nci_dev *ndev;
- struct nfc_phy_ops *ops;
+ const struct nfc_phy_ops *ops;
void *phy_id;
struct timer_list t1_timer;
@@ -45,8 +45,8 @@ int ndlc_open(struct llt_ndlc *ndlc);
void ndlc_close(struct llt_ndlc *ndlc);
int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb);
void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb);
-int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
- int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id,
- struct st_nci_se_status *se_status);
+int ndlc_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
+ struct device *dev, int phy_headroom, int phy_tailroom,
+ struct llt_ndlc **ndlc_id, struct st_nci_se_status *se_status);
void ndlc_remove(struct llt_ndlc *ndlc);
#endif /* __LOCAL_NDLC_H__ */
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 250d56f204c3..a620c34790e6 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -198,7 +198,7 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops spi_phy_ops = {
+static const struct nfc_phy_ops spi_phy_ops = {
.write = st_nci_spi_write,
.enable = st_nci_spi_enable,
.disable = st_nci_spi_disable,
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index 94b600029a2a..30d2912d1a05 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -371,7 +371,7 @@ static int st_nci_manufacturer_specific(struct nfc_dev *dev, void *data,
return nfc_vendor_cmd_reply(msg);
}
-static struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
+static const struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
{
.vendor_id = ST_NCI_VENDOR_OUI,
.subcmd = FACTORY_MODE,
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index 6ca0d2f56b18..5e6c99fcfd27 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -72,7 +72,7 @@
static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES);
-static struct nfc_hci_gate st21nfca_gates[] = {
+static const struct nfc_hci_gate st21nfca_gates[] = {
{NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE},
{NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
{ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
@@ -912,7 +912,7 @@ static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe,
}
}
-static struct nfc_hci_ops st21nfca_hci_ops = {
+static const struct nfc_hci_ops st21nfca_hci_ops = {
.open = st21nfca_hci_open,
.close = st21nfca_hci_close,
.load_session = st21nfca_hci_load_session,
@@ -935,7 +935,7 @@ static struct nfc_hci_ops st21nfca_hci_ops = {
.se_io = st21nfca_hci_se_io,
};
-int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+int st21nfca_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
char *llc_name, int phy_headroom, int phy_tailroom,
int phy_payload, struct nfc_hci_dev **hdev,
struct st21nfca_se_status *se_status)
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 7a9f4d71707e..1b44a37a71aa 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -76,8 +76,8 @@ struct st21nfca_i2c_phy {
struct mutex phy_lock;
};
-static u8 len_seq[] = { 16, 24, 12, 29 };
-static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
+static const u8 len_seq[] = { 16, 24, 12, 29 };
+static const u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
#define I2C_DUMP_SKB(info, skb) \
do { \
@@ -482,7 +482,7 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
}
-static struct nfc_phy_ops i2c_phy_ops = {
+static const struct nfc_phy_ops i2c_phy_ops = {
.write = st21nfca_hci_i2c_write,
.enable = st21nfca_hci_i2c_enable,
.disable = st21nfca_hci_i2c_disable,
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index 5e0de0fef1d4..cb6ad916be91 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -144,7 +144,7 @@ struct st21nfca_se_info {
};
struct st21nfca_hci_info {
- struct nfc_phy_ops *phy_ops;
+ const struct nfc_phy_ops *phy_ops;
void *phy_id;
struct nfc_hci_dev *hdev;
@@ -163,7 +163,7 @@ struct st21nfca_hci_info {
struct st21nfca_vendor_info vendor_info;
};
-int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+int st21nfca_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops,
char *llc_name, int phy_headroom, int phy_tailroom,
int phy_payload, struct nfc_hci_dev **hdev,
struct st21nfca_se_status *se_status);
diff --git a/drivers/nfc/st21nfca/vendor_cmds.c b/drivers/nfc/st21nfca/vendor_cmds.c
index 62332ca91554..74882866dbaf 100644
--- a/drivers/nfc/st21nfca/vendor_cmds.c
+++ b/drivers/nfc/st21nfca/vendor_cmds.c
@@ -295,7 +295,7 @@ exit:
return r;
}
-static struct nfc_vendor_cmd st21nfca_vendor_cmds[] = {
+static const struct nfc_vendor_cmd st21nfca_vendor_cmds[] = {
{
.vendor_id = ST21NFCA_VENDOR_OUI,
.subcmd = FACTORY_MODE,
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 2dc788c363fd..993818742570 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1037,7 +1037,7 @@ static void st95hf_abort_cmd(struct nfc_digital_dev *ddev)
{
}
-static struct nfc_digital_ops st95hf_nfc_digital_ops = {
+static const struct nfc_digital_ops st95hf_nfc_digital_ops = {
.in_configure_hw = st95hf_in_configure_hw,
.in_send_cmd = st95hf_in_send_cmd,
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 33978022ae47..8890fcd59c39 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -643,7 +643,7 @@ static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno)
}
static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
- unsigned int len, u8 *prefix,
+ unsigned int len, const u8 *prefix,
unsigned int prefix_len)
{
struct spi_transfer t[2];
@@ -1387,9 +1387,10 @@ static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
}
}
-static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
+static int trf7970a_per_cmd_config(struct trf7970a *trf,
+ const struct sk_buff *skb)
{
- u8 *req = skb->data;
+ const u8 *req = skb->data;
u8 special_fcn_reg1, iso_ctrl;
int ret;
@@ -1791,7 +1792,7 @@ out_err:
static int trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
nfc_digital_cmd_complete_t cb, void *arg)
{
- struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Listen - state: %d, timeout: %d ms\n",
trf->state, timeout);
@@ -1803,7 +1804,7 @@ static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev,
u16 timeout, nfc_digital_cmd_complete_t cb,
void *arg)
{
- struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
int ret;
dev_dbg(trf->dev, "Listen MD - state: %d, timeout: %d ms\n",
@@ -1824,7 +1825,7 @@ static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev,
static int trf7970a_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech)
{
- struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ const struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
dev_dbg(trf->dev, "Get RF Tech - state: %d, rf_tech: %d\n",
trf->state, trf->md_rf_tech);
@@ -1861,7 +1862,7 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
mutex_unlock(&trf->lock);
}
-static struct nfc_digital_ops trf7970a_nfc_ops = {
+static const struct nfc_digital_ops trf7970a_nfc_ops = {
.in_configure_hw = trf7970a_in_configure_hw,
.in_send_cmd = trf7970a_send_cmd,
.tg_configure_hw = trf7970a_tg_configure_hw,
@@ -1974,7 +1975,7 @@ static void trf7970a_shutdown(struct trf7970a *trf)
trf7970a_power_down(trf);
}
-static int trf7970a_get_autosuspend_delay(struct device_node *np)
+static int trf7970a_get_autosuspend_delay(const struct device_node *np)
{
int autosuspend_delay, ret;
@@ -1987,7 +1988,7 @@ static int trf7970a_get_autosuspend_delay(struct device_node *np)
static int trf7970a_probe(struct spi_device *spi)
{
- struct device_node *np = spi->dev.of_node;
+ const struct device_node *np = spi->dev.of_node;
struct trf7970a *trf;
int uvolts, autosuspend_delay, ret;
u32 clk_freq = TRF7970A_13MHZ_CLOCK_FREQUENCY;
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index f73ee0bf3593..221fa3bb8705 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
+#include <linux/wait.h>
#include <net/nfc/nci_core.h>
enum virtual_ncidev_mode {
@@ -27,6 +28,7 @@ enum virtual_ncidev_mode {
NFC_PROTO_ISO15693_MASK)
static enum virtual_ncidev_mode state;
+static DECLARE_WAIT_QUEUE_HEAD(wq);
static struct miscdevice miscdev;
static struct sk_buff *send_buff;
static struct nci_dev *ndev;
@@ -61,11 +63,12 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
}
send_buff = skb_copy(skb, GFP_KERNEL);
mutex_unlock(&nci_mutex);
+ wake_up_interruptible(&wq);
return 0;
}
-static struct nci_ops virtual_nci_ops = {
+static const struct nci_ops virtual_nci_ops = {
.open = virtual_nci_open,
.close = virtual_nci_close,
.send = virtual_nci_send
@@ -77,9 +80,11 @@ static ssize_t virtual_ncidev_read(struct file *file, char __user *buf,
size_t actual_len;
mutex_lock(&nci_mutex);
- if (!send_buff) {
+ while (!send_buff) {
mutex_unlock(&nci_mutex);
- return 0;
+ if (wait_event_interruptible(wq, send_buff))
+ return -EFAULT;
+ mutex_lock(&nci_mutex);
}
actual_len = min_t(size_t, count, send_buff->len);
@@ -170,7 +175,7 @@ static int virtual_ncidev_close(struct inode *inode, struct file *file)
static long virtual_ncidev_ioctl(struct file *flip, unsigned int cmd,
unsigned long arg)
{
- struct nfc_dev *nfc_dev = ndev->nfc_dev;
+ const struct nfc_dev *nfc_dev = ndev->nfc_dev;
void __user *p = (void __user *)arg;
if (cmd != IOCTL_GET_NCIDEV_IDX)
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index e085c255da0c..32660dc11354 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -8,6 +8,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
depends on NET && POSIX_TIMERS
+ default ETHERNET
select PPS
select NET_PTP_CLASSIFY
help
@@ -26,6 +27,18 @@ config PTP_1588_CLOCK
To compile this driver as a module, choose M here: the module
will be called ptp.
+config PTP_1588_CLOCK_OPTIONAL
+ tristate
+ default y if PTP_1588_CLOCK=n
+ default PTP_1588_CLOCK
+ help
+ Drivers that can optionally use the PTP_1588_CLOCK framework
+ should depend on this symbol to prevent them from being built
+ into vmlinux while the PTP support itself is in a loadable
+ module.
+ If PTP support is disabled, this dependency will still be
+ met, and drivers refer to dummy helpers.
+
config PTP_1588_CLOCK_DTE
tristate "Broadcom DTE as PTP clock"
depends on PTP_1588_CLOCK
@@ -92,7 +105,7 @@ config PTP_1588_CLOCK_PCH
depends on X86_32 || COMPILE_TEST
depends on HAS_IOMEM && PCI
depends on NET
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
@@ -158,6 +171,15 @@ config PTP_1588_CLOCK_OCP
tristate "OpenCompute TimeCard as PTP clock"
depends on PTP_1588_CLOCK
depends on HAS_IOMEM && PCI
+ depends on SPI && I2C && MTD
+ depends on !S390
+ imply SPI_MEM
+ imply SPI_XILINX
+ imply MTD_SPI_NOR
+ imply I2C_XILINX
+ select SERIAL_8250
+ select NET_DEVLINK
+
default n
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 0d1034e3ed0f..caf9b37c5eb1 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -6,15 +6,29 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/serial_8250.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/xilinx_spi.h>
+#include <net/devlink.h>
+#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
-static const struct pci_device_id ptp_ocp_pcidev_id[] = {
- { PCI_DEVICE(0x1d9b, 0x0400) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
+#ifndef PCI_VENDOR_ID_FACEBOOK
+#define PCI_VENDOR_ID_FACEBOOK 0x1d9b
+#endif
-#define OCP_REGISTER_OFFSET 0x01000000
+#ifndef PCI_DEVICE_ID_FACEBOOK_TIMECARD
+#define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400
+#endif
+
+static struct class timecard_class = {
+ .owner = THIS_MODULE,
+ .name = "timecard",
+};
struct ocp_reg {
u32 ctrl;
@@ -29,18 +43,29 @@ struct ocp_reg {
u32 __pad1[2];
u32 offset_ns;
u32 offset_window_ns;
+ u32 __pad2[2];
+ u32 drift_ns;
+ u32 drift_window_ns;
+ u32 __pad3[6];
+ u32 servo_offset_p;
+ u32 servo_offset_i;
+ u32 servo_drift_p;
+ u32 servo_drift_i;
};
#define OCP_CTRL_ENABLE BIT(0)
#define OCP_CTRL_ADJUST_TIME BIT(1)
#define OCP_CTRL_ADJUST_OFFSET BIT(2)
+#define OCP_CTRL_ADJUST_DRIFT BIT(3)
+#define OCP_CTRL_ADJUST_SERVO BIT(8)
#define OCP_CTRL_READ_TIME_REQ BIT(30)
#define OCP_CTRL_READ_TIME_DONE BIT(31)
#define OCP_STATUS_IN_SYNC BIT(0)
+#define OCP_STATUS_IN_HOLDOVER BIT(1)
#define OCP_SELECT_CLK_NONE 0
-#define OCP_SELECT_CLK_REG 6
+#define OCP_SELECT_CLK_REG 0xfe
struct tod_reg {
u32 ctrl;
@@ -55,8 +80,6 @@ struct tod_reg {
u32 leap;
};
-#define TOD_REGISTER_OFFSET 0x01050000
-
#define TOD_CTRL_PROTOCOL BIT(28)
#define TOD_CTRL_DISABLE_FMT_A BIT(17)
#define TOD_CTRL_DISABLE_FMT_B BIT(16)
@@ -68,16 +91,264 @@ struct tod_reg {
#define TOD_STATUS_UTC_VALID BIT(8)
#define TOD_STATUS_LEAP_VALID BIT(16)
+struct ts_reg {
+ u32 enable;
+ u32 error;
+ u32 polarity;
+ u32 version;
+ u32 __pad0[4];
+ u32 cable_delay;
+ u32 __pad1[3];
+ u32 intr;
+ u32 intr_mask;
+ u32 event_count;
+ u32 __pad2[1];
+ u32 ts_count;
+ u32 time_ns;
+ u32 time_sec;
+ u32 data_width;
+ u32 data;
+};
+
+struct pps_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0[6];
+ u32 cable_delay;
+};
+
+#define PPS_STATUS_FILTER_ERR BIT(0)
+#define PPS_STATUS_SUPERV_ERR BIT(1)
+
+struct img_reg {
+ u32 version;
+};
+
+struct ptp_ocp_flash_info {
+ const char *name;
+ int pci_offset;
+ int data_size;
+ void *data;
+};
+
+struct ptp_ocp_ext_info {
+ const char *name;
+ int index;
+ irqreturn_t (*irq_fcn)(int irq, void *priv);
+ int (*enable)(void *priv, bool enable);
+};
+
+struct ptp_ocp_ext_src {
+ void __iomem *mem;
+ struct ptp_ocp *bp;
+ struct ptp_ocp_ext_info *info;
+ int irq_vec;
+};
+
struct ptp_ocp {
struct pci_dev *pdev;
+ struct device dev;
spinlock_t lock;
- void __iomem *base;
struct ocp_reg __iomem *reg;
struct tod_reg __iomem *tod;
+ struct pps_reg __iomem *pps_to_ext;
+ struct pps_reg __iomem *pps_to_clk;
+ struct ptp_ocp_ext_src *pps;
+ struct ptp_ocp_ext_src *ts0;
+ struct ptp_ocp_ext_src *ts1;
+ struct img_reg __iomem *image;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
+ struct platform_device *i2c_ctrl;
+ struct platform_device *spi_flash;
+ struct clk_hw *i2c_clk;
+ struct timer_list watchdog;
+ time64_t gnss_lost;
+ int id;
+ int n_irqs;
+ int gnss_port;
+ int mac_port; /* miniature atomic clock */
+ u8 serial[6];
+ int flash_start;
+ bool has_serial;
};
+struct ocp_resource {
+ unsigned long offset;
+ int size;
+ int irq_vec;
+ int (*setup)(struct ptp_ocp *bp, struct ocp_resource *r);
+ void *extra;
+ unsigned long bp_offset;
+};
+
+static int ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
+static int ptp_ocp_ts_enable(void *priv, bool enable);
+
+#define bp_assign_entry(bp, res, val) ({ \
+ uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \
+ *(typeof(val) *)addr = val; \
+})
+
+#define OCP_RES_LOCATION(member) \
+ .bp_offset = offsetof(struct ptp_ocp, member)
+
+#define OCP_MEM_RESOURCE(member) \
+ OCP_RES_LOCATION(member), .setup = ptp_ocp_register_mem
+
+#define OCP_SERIAL_RESOURCE(member) \
+ OCP_RES_LOCATION(member), .setup = ptp_ocp_register_serial
+
+#define OCP_I2C_RESOURCE(member) \
+ OCP_RES_LOCATION(member), .setup = ptp_ocp_register_i2c
+
+#define OCP_SPI_RESOURCE(member) \
+ OCP_RES_LOCATION(member), .setup = ptp_ocp_register_spi
+
+#define OCP_EXT_RESOURCE(member) \
+ OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
+
+/* This is the MSI vector mapping used.
+ * 0: N/C
+ * 1: TS0
+ * 2: TS1
+ * 3: GPS
+ * 4: GPS2 (n/c)
+ * 5: MAC
+ * 6: SPI IMU (inertial measurement unit)
+ * 7: I2C oscillator
+ * 8: HWICAP
+ * 9: SPI Flash
+ */
+
+static struct ocp_resource ocp_fb_resource[] = {
+ {
+ OCP_MEM_RESOURCE(reg),
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+ OCP_EXT_RESOURCE(ts0),
+ .offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .name = "ts0", .index = 0,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts1),
+ .offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .name = "ts1", .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_ext),
+ .offset = 0x01030000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_clk),
+ .offset = 0x01040000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(tod),
+ .offset = 0x01050000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(image),
+ .offset = 0x00020000, .size = 0x1000,
+ },
+ {
+ OCP_I2C_RESOURCE(i2c_ctrl),
+ .offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
+ },
+ {
+ OCP_SERIAL_RESOURCE(gnss_port),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ },
+ {
+ OCP_SERIAL_RESOURCE(mac_port),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ },
+ {
+ OCP_SPI_RESOURCE(spi_flash),
+ .offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
+ .extra = &(struct ptp_ocp_flash_info) {
+ .name = "xilinx_spi", .pci_offset = 0,
+ .data_size = sizeof(struct xspi_platform_data),
+ .data = &(struct xspi_platform_data) {
+ .num_chipselect = 1,
+ .bits_per_word = 8,
+ .num_devices = 1,
+ .devices = &(struct spi_board_info) {
+ .modalias = "spi-nor",
+ },
+ },
+ },
+ },
+ {
+ .setup = ptp_ocp_fb_board_init,
+ },
+ { }
+};
+
+static const struct pci_device_id ptp_ocp_pcidev_id[] = {
+ { PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
+
+static DEFINE_MUTEX(ptp_ocp_lock);
+static DEFINE_IDR(ptp_ocp_idr);
+
+static struct {
+ const char *name;
+ int value;
+} ptp_ocp_clock[] = {
+ { .name = "NONE", .value = 0 },
+ { .name = "TOD", .value = 1 },
+ { .name = "IRIG", .value = 2 },
+ { .name = "PPS", .value = 3 },
+ { .name = "PTP", .value = 4 },
+ { .name = "RTC", .value = 5 },
+ { .name = "DCF", .value = 6 },
+ { .name = "REGS", .value = 0xfe },
+ { .name = "EXT", .value = 0xff },
+};
+
+static const char *
+ptp_ocp_clock_name_from_val(int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++)
+ if (ptp_ocp_clock[i].value == val)
+ return ptp_ocp_clock[i].name;
+ return NULL;
+}
+
+static int
+ptp_ocp_clock_val_from_name(const char *name)
+{
+ const char *clk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
+ clk = ptp_ocp_clock[i].name;
+ if (!strncasecmp(name, clk, strlen(clk)))
+ return ptp_ocp_clock[i].value;
+ }
+ return -EINVAL;
+}
+
static int
__ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
@@ -192,6 +463,45 @@ ptp_ocp_null_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
return -EOPNOTSUPP;
}
+static int
+ptp_ocp_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
+ int on)
+{
+ struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
+ struct ptp_ocp_ext_src *ext = NULL;
+ int err;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ ext = bp->ts0;
+ break;
+ case 1:
+ ext = bp->ts1;
+ break;
+ }
+ break;
+ case PTP_CLK_REQ_PPS:
+ ext = bp->pps;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = -ENXIO;
+ if (ext)
+ err = ext->info->enable(ext, on);
+
+ return err;
+}
+
static const struct ptp_clock_info ptp_ocp_clock_info = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
@@ -200,10 +510,57 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
.settime64 = ptp_ocp_settime,
.adjtime = ptp_ocp_adjtime,
.adjfine = ptp_ocp_null_adjfine,
+ .adjphase = ptp_ocp_adjphase,
+ .enable = ptp_ocp_enable,
+ .pps = true,
+ .n_ext_ts = 2,
};
+static void
+__ptp_ocp_clear_drift_locked(struct ptp_ocp *bp)
+{
+ u32 ctrl, select;
+
+ select = ioread32(&bp->reg->select);
+ iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
+
+ iowrite32(0, &bp->reg->drift_ns);
+
+ ctrl = ioread32(&bp->reg->ctrl);
+ ctrl |= OCP_CTRL_ADJUST_DRIFT;
+ iowrite32(ctrl, &bp->reg->ctrl);
+
+ /* restore clock selection */
+ iowrite32(select >> 16, &bp->reg->select);
+}
+
+static void
+ptp_ocp_watchdog(struct timer_list *t)
+{
+ struct ptp_ocp *bp = from_timer(bp, t, watchdog);
+ unsigned long flags;
+ u32 status;
+
+ status = ioread32(&bp->pps_to_clk->status);
+
+ if (status & PPS_STATUS_SUPERV_ERR) {
+ iowrite32(status, &bp->pps_to_clk->status);
+ if (!bp->gnss_lost) {
+ spin_lock_irqsave(&bp->lock, flags);
+ __ptp_ocp_clear_drift_locked(bp);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ bp->gnss_lost = ktime_get_real_seconds();
+ }
+
+ } else if (bp->gnss_lost) {
+ bp->gnss_lost = 0;
+ }
+
+ mod_timer(&bp->watchdog, jiffies + HZ);
+}
+
static int
-ptp_ocp_check_clock(struct ptp_ocp *bp)
+ptp_ocp_init_clock(struct ptp_ocp *bp)
{
struct timespec64 ts;
bool sync;
@@ -214,6 +571,17 @@ ptp_ocp_check_clock(struct ptp_ocp *bp)
ctrl |= OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
+ /* NO DRIFT Correction */
+ /* offset_p:i 1/8, offset_i: 1/16, drift_p: 0, drift_i: 0 */
+ iowrite32(0x2000, &bp->reg->servo_offset_p);
+ iowrite32(0x1000, &bp->reg->servo_offset_i);
+ iowrite32(0, &bp->reg->servo_drift_p);
+ iowrite32(0, &bp->reg->servo_drift_i);
+
+ /* latch servo values */
+ ctrl |= OCP_CTRL_ADJUST_SERVO;
+ iowrite32(ctrl, &bp->reg->ctrl);
+
if ((ioread32(&bp->reg->ctrl) & OCP_CTRL_ENABLE) == 0) {
dev_err(&bp->pdev->dev, "clock not enabled\n");
return -ENODEV;
@@ -229,6 +597,9 @@ ptp_ocp_check_clock(struct ptp_ocp *bp)
ts.tv_sec, ts.tv_nsec,
sync ? "in-sync" : "UNSYNCED");
+ timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
+ mod_timer(&bp->watchdog, jiffies + HZ);
+
return 0;
}
@@ -278,82 +649,840 @@ ptp_ocp_tod_info(struct ptp_ocp *bp)
reg & TOD_STATUS_LEAP_VALID ? 1 : 0);
}
+static int
+ptp_ocp_firstchild(struct device *dev, void *data)
+{
+ return 1;
+}
+
+static int
+ptp_ocp_read_i2c(struct i2c_adapter *adap, u8 addr, u8 reg, u8 sz, u8 *data)
+{
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = addr,
+ .len = 1,
+ .buf = &reg,
+ },
+ {
+ .addr = addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = data,
+ },
+ };
+ int err;
+ u8 len;
+
+ /* xiic-i2c for some stupid reason only does 2 byte reads. */
+ while (sz) {
+ len = min_t(u8, sz, 2);
+ msgs[1].len = len;
+ err = i2c_transfer(adap, msgs, 2);
+ if (err != msgs[1].len)
+ return err;
+ msgs[1].buf += len;
+ reg += len;
+ sz -= len;
+ }
+ return 0;
+}
+
+static void
+ptp_ocp_get_serial_number(struct ptp_ocp *bp)
+{
+ struct i2c_adapter *adap;
+ struct device *dev;
+ int err;
+
+ dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild);
+ if (!dev) {
+ dev_err(&bp->pdev->dev, "Can't find I2C adapter\n");
+ return;
+ }
+
+ adap = i2c_verify_adapter(dev);
+ if (!adap) {
+ dev_err(&bp->pdev->dev, "device '%s' isn't an I2C adapter\n",
+ dev_name(dev));
+ goto out;
+ }
+
+ err = ptp_ocp_read_i2c(adap, 0x58, 0x9A, 6, bp->serial);
+ if (err) {
+ dev_err(&bp->pdev->dev, "could not read eeprom: %d\n", err);
+ goto out;
+ }
+
+ bp->has_serial = true;
+
+out:
+ put_device(dev);
+}
+
static void
ptp_ocp_info(struct ptp_ocp *bp)
{
- static const char * const clock_name[] = {
- "NO", "TOD", "IRIG", "PPS", "PTP", "RTC", "REGS", "EXT"
- };
u32 version, select;
version = ioread32(&bp->reg->version);
select = ioread32(&bp->reg->select);
dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
version >> 24, (version >> 16) & 0xff, version & 0xffff,
- clock_name[select & 7],
+ ptp_ocp_clock_name_from_val(select >> 16),
ptp_clock_index(bp->ptp));
ptp_ocp_tod_info(bp);
}
+static struct device *
+ptp_ocp_find_flash(struct ptp_ocp *bp)
+{
+ struct device *dev, *last;
+
+ last = NULL;
+ dev = &bp->spi_flash->dev;
+
+ while ((dev = device_find_child(dev, NULL, ptp_ocp_firstchild))) {
+ if (!strcmp("mtd", dev_bus_name(dev)))
+ break;
+ put_device(last);
+ last = dev;
+ }
+ put_device(last);
+
+ return dev;
+}
+
static int
-ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ptp_ocp_devlink_flash(struct devlink *devlink, struct device *dev,
+ const struct firmware *fw)
{
- struct ptp_ocp *bp;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct ptp_ocp *bp = devlink_priv(devlink);
+ size_t off, len, resid, wrote;
+ struct erase_info erase;
+ size_t base, blksz;
+ int err = 0;
+
+ off = 0;
+ base = bp->flash_start;
+ blksz = 4096;
+ resid = fw->size;
+
+ while (resid) {
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ NULL, off, fw->size);
+
+ len = min_t(size_t, resid, blksz);
+ erase.addr = base + off;
+ erase.len = blksz;
+
+ err = mtd_erase(mtd, &erase);
+ if (err)
+ goto out;
+
+ err = mtd_write(mtd, base + off, len, &wrote, &fw->data[off]);
+ if (err)
+ goto out;
+
+ off += blksz;
+ resid -= len;
+ }
+out:
+ return err;
+}
+
+static int
+ptp_ocp_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp *bp = devlink_priv(devlink);
+ struct device *dev;
+ const char *msg;
+ int err;
+
+ dev = ptp_ocp_find_flash(bp);
+ if (!dev) {
+ dev_err(&bp->pdev->dev, "Can't find Flash SPI adapter\n");
+ return -ENODEV;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Preparing to flash",
+ NULL, 0, 0);
+
+ err = ptp_ocp_devlink_flash(devlink, dev, params->fw);
+
+ msg = err ? "Flash error" : "Flash complete";
+ devlink_flash_update_status_notify(devlink, msg, NULL, 0, 0);
+
+ put_device(dev);
+ return err;
+}
+
+static int
+ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ptp_ocp *bp = devlink_priv(devlink);
+ char buf[32];
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ if (bp->image) {
+ u32 ver = ioread32(&bp->image->version);
+
+ if (ver & 0xffff) {
+ sprintf(buf, "%d", ver);
+ err = devlink_info_version_running_put(req,
+ "fw",
+ buf);
+ } else {
+ sprintf(buf, "%d", ver >> 16);
+ err = devlink_info_version_running_put(req,
+ "loader",
+ buf);
+ }
+ if (err)
+ return err;
+ }
+
+ if (!bp->has_serial)
+ ptp_ocp_get_serial_number(bp);
+
+ if (bp->has_serial) {
+ sprintf(buf, "%pM", bp->serial);
+ err = devlink_info_serial_number_put(req, buf);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops ptp_ocp_devlink_ops = {
+ .flash_update = ptp_ocp_devlink_flash_update,
+ .info_get = ptp_ocp_devlink_info_get,
+};
+
+static void __iomem *
+__ptp_ocp_get_mem(struct ptp_ocp *bp, unsigned long start, int size)
+{
+ struct resource res = DEFINE_RES_MEM_NAMED(start, size, "ptp_ocp");
+
+ return devm_ioremap_resource(&bp->pdev->dev, &res);
+}
+
+static void __iomem *
+ptp_ocp_get_mem(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ unsigned long start;
+
+ start = pci_resource_start(bp->pdev, 0) + r->offset;
+ return __ptp_ocp_get_mem(bp, start, r->size);
+}
+
+static void
+ptp_ocp_set_irq_resource(struct resource *res, int irq)
+{
+ struct resource r = DEFINE_RES_IRQ(irq);
+ *res = r;
+}
+
+static void
+ptp_ocp_set_mem_resource(struct resource *res, unsigned long start, int size)
+{
+ struct resource r = DEFINE_RES_MEM(start, size);
+ *res = r;
+}
+
+static int
+ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ struct ptp_ocp_flash_info *info;
+ struct pci_dev *pdev = bp->pdev;
+ struct platform_device *p;
+ struct resource res[2];
+ unsigned long start;
+ int id;
+
+ /* XXX hack to work around old FPGA */
+ if (bp->n_irqs < 10) {
+ dev_err(&bp->pdev->dev, "FPGA does not have SPI devices\n");
+ return 0;
+ }
+
+ if (r->irq_vec > bp->n_irqs) {
+ dev_err(&bp->pdev->dev, "spi device irq %d out of range\n",
+ r->irq_vec);
+ return 0;
+ }
+
+ start = pci_resource_start(pdev, 0) + r->offset;
+ ptp_ocp_set_mem_resource(&res[0], start, r->size);
+ ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
+
+ info = r->extra;
+ id = pci_dev_id(pdev) << 1;
+ id += info->pci_offset;
+
+ p = platform_device_register_resndata(&pdev->dev, info->name, id,
+ res, 2, info->data,
+ info->data_size);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ bp_assign_entry(bp, r, p);
+
+ return 0;
+}
+
+static struct platform_device *
+ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id)
+{
+ struct resource res[2];
+ unsigned long start;
+
+ start = pci_resource_start(pdev, 0) + r->offset;
+ ptp_ocp_set_mem_resource(&res[0], start, r->size);
+ ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
+
+ return platform_device_register_resndata(&pdev->dev, "xiic-i2c",
+ id, res, 2, NULL, 0);
+}
+
+static int
+ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct platform_device *p;
+ struct clk_hw *clk;
+ char buf[32];
+ int id;
+
+ if (r->irq_vec > bp->n_irqs) {
+ dev_err(&bp->pdev->dev, "i2c device irq %d out of range\n",
+ r->irq_vec);
+ return 0;
+ }
+
+ id = pci_dev_id(bp->pdev);
+
+ sprintf(buf, "AXI.%d", id);
+ clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0, 50000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ bp->i2c_clk = clk;
+
+ sprintf(buf, "xiic-i2c.%d", id);
+ devm_clk_hw_register_clkdev(&pdev->dev, clk, NULL, buf);
+ p = ptp_ocp_i2c_bus(bp->pdev, r, id);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ bp_assign_entry(bp, r, p);
+
+ return 0;
+}
+
+static irqreturn_t
+ptp_ocp_ts_irq(int irq, void *priv)
+{
+ struct ptp_ocp_ext_src *ext = priv;
+ struct ts_reg __iomem *reg = ext->mem;
+ struct ptp_clock_event ev;
+ u32 sec, nsec;
+
+ /* XXX should fix API - this converts s/ns -> ts -> s/ns */
+ sec = ioread32(&reg->time_sec);
+ nsec = ioread32(&reg->time_ns);
+
+ ev.type = PTP_CLOCK_EXTTS;
+ ev.index = ext->info->index;
+ ev.timestamp = sec * 1000000000ULL + nsec;
+
+ ptp_clock_event(ext->bp->ptp, &ev);
+
+ iowrite32(1, &reg->intr); /* write 1 to ack */
+
+ return IRQ_HANDLED;
+}
+
+static int
+ptp_ocp_ts_enable(void *priv, bool enable)
+{
+ struct ptp_ocp_ext_src *ext = priv;
+ struct ts_reg __iomem *reg = ext->mem;
+
+ if (enable) {
+ iowrite32(1, &reg->enable);
+ iowrite32(1, &reg->intr_mask);
+ iowrite32(1, &reg->intr);
+ } else {
+ iowrite32(0, &reg->intr_mask);
+ iowrite32(0, &reg->enable);
+ }
+
+ return 0;
+}
+
+static void
+ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext)
+{
+ ext->info->enable(ext, false);
+ pci_free_irq(ext->bp->pdev, ext->irq_vec, ext);
+ kfree(ext);
+}
+
+static int
+ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct ptp_ocp_ext_src *ext;
int err;
- bp = kzalloc(sizeof(*bp), GFP_KERNEL);
- if (!bp)
+ ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+ if (!ext)
return -ENOMEM;
+
+ err = -EINVAL;
+ ext->mem = ptp_ocp_get_mem(bp, r);
+ if (!ext->mem)
+ goto out;
+
+ ext->bp = bp;
+ ext->info = r->extra;
+ ext->irq_vec = r->irq_vec;
+
+ err = pci_request_irq(pdev, r->irq_vec, ext->info->irq_fcn, NULL,
+ ext, "ocp%d.%s", bp->id, ext->info->name);
+ if (err) {
+ dev_err(&pdev->dev, "Could not get irq %d\n", r->irq_vec);
+ goto out;
+ }
+
+ bp_assign_entry(bp, r, ext);
+
+ return 0;
+
+out:
+ kfree(ext);
+ return err;
+}
+
+static int
+ptp_ocp_serial_line(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct uart_8250_port uart;
+
+ /* Setting UPF_IOREMAP and leaving port.membase unspecified lets
+ * the serial port device claim and release the pci resource.
+ */
+ memset(&uart, 0, sizeof(uart));
+ uart.port.dev = &pdev->dev;
+ uart.port.iotype = UPIO_MEM;
+ uart.port.regshift = 2;
+ uart.port.mapbase = pci_resource_start(pdev, 0) + r->offset;
+ uart.port.irq = pci_irq_vector(pdev, r->irq_vec);
+ uart.port.uartclk = 50000000;
+ uart.port.flags = UPF_FIXED_TYPE | UPF_IOREMAP;
+ uart.port.type = PORT_16550A;
+
+ return serial8250_register_8250_port(&uart);
+}
+
+static int
+ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ int port;
+
+ if (r->irq_vec > bp->n_irqs) {
+ dev_err(&bp->pdev->dev, "serial device irq %d out of range\n",
+ r->irq_vec);
+ return 0;
+ }
+
+ port = ptp_ocp_serial_line(bp, r);
+ if (port < 0)
+ return port;
+
+ bp_assign_entry(bp, r, port);
+
+ return 0;
+}
+
+static int
+ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ void __iomem *mem;
+
+ mem = ptp_ocp_get_mem(bp, r);
+ if (!mem)
+ return -EINVAL;
+
+ bp_assign_entry(bp, r, mem);
+
+ return 0;
+}
+
+/* FB specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ bp->flash_start = 1024 * 4096;
+
+ return ptp_ocp_init_clock(bp);
+}
+
+static int
+ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
+{
+ struct ocp_resource *r, *table;
+ int err = 0;
+
+ table = (struct ocp_resource *)driver_data;
+ for (r = table; r->setup; r++) {
+ err = r->setup(bp, r);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static ssize_t
+serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+ if (!bp->has_serial)
+ ptp_ocp_get_serial_number(bp);
+
+ return sysfs_emit(buf, "%pM\n", bp->serial);
+}
+static DEVICE_ATTR_RO(serialnum);
+
+static ssize_t
+gnss_sync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ if (bp->gnss_lost)
+ ret = sysfs_emit(buf, "LOST @ %ptT\n", &bp->gnss_lost);
+ else
+ ret = sysfs_emit(buf, "SYNC\n");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(gnss_sync);
+
+static ssize_t
+clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ const char *p;
+ u32 select;
+
+ select = ioread32(&bp->reg->select);
+ p = ptp_ocp_clock_name_from_val(select >> 16);
+
+ return sysfs_emit(buf, "%s\n", p);
+}
+
+static ssize_t
+clock_source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ unsigned long flags;
+ int val;
+
+ val = ptp_ocp_clock_val_from_name(buf);
+ if (val < 0)
+ return val;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ iowrite32(val, &bp->reg->select);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR_RW(clock_source);
+
+static ssize_t
+available_clock_sources_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *clk;
+ ssize_t count;
+ int i;
+
+ count = 0;
+ for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
+ clk = ptp_ocp_clock[i].name;
+ count += sysfs_emit_at(buf, count, "%s ", clk);
+ }
+ if (count)
+ count--;
+ count += sysfs_emit_at(buf, count, "\n");
+ return count;
+}
+static DEVICE_ATTR_RO(available_clock_sources);
+
+static struct attribute *timecard_attrs[] = {
+ &dev_attr_serialnum.attr,
+ &dev_attr_gnss_sync.attr,
+ &dev_attr_clock_source.attr,
+ &dev_attr_available_clock_sources.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(timecard);
+
+static void
+ptp_ocp_dev_release(struct device *dev)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+ mutex_lock(&ptp_ocp_lock);
+ idr_remove(&ptp_ocp_idr, bp->id);
+ mutex_unlock(&ptp_ocp_lock);
+}
+
+static int
+ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
+{
+ int err;
+
+ mutex_lock(&ptp_ocp_lock);
+ err = idr_alloc(&ptp_ocp_idr, bp, 0, 0, GFP_KERNEL);
+ mutex_unlock(&ptp_ocp_lock);
+ if (err < 0) {
+ dev_err(&pdev->dev, "idr_alloc failed: %d\n", err);
+ return err;
+ }
+ bp->id = err;
+
+ bp->ptp_info = ptp_ocp_clock_info;
+ spin_lock_init(&bp->lock);
+ bp->gnss_port = -1;
+ bp->mac_port = -1;
bp->pdev = pdev;
+
+ device_initialize(&bp->dev);
+ dev_set_name(&bp->dev, "ocp%d", bp->id);
+ bp->dev.class = &timecard_class;
+ bp->dev.parent = &pdev->dev;
+ bp->dev.release = ptp_ocp_dev_release;
+ dev_set_drvdata(&bp->dev, bp);
+
+ err = device_add(&bp->dev);
+ if (err) {
+ dev_err(&bp->dev, "device add failed: %d\n", err);
+ goto out;
+ }
+
pci_set_drvdata(pdev, bp);
+ return 0;
+
+out:
+ ptp_ocp_dev_release(&bp->dev);
+ put_device(&bp->dev);
+ return err;
+}
+
+static void
+ptp_ocp_symlink(struct ptp_ocp *bp, struct device *child, const char *link)
+{
+ struct device *dev = &bp->dev;
+
+ if (sysfs_create_link(&dev->kobj, &child->kobj, link))
+ dev_err(dev, "%s symlink failed\n", link);
+}
+
+static void
+ptp_ocp_link_child(struct ptp_ocp *bp, const char *name, const char *link)
+{
+ struct device *dev, *child;
+
+ dev = &bp->pdev->dev;
+
+ child = device_find_child_by_name(dev, name);
+ if (!child) {
+ dev_err(dev, "Could not find device %s\n", name);
+ return;
+ }
+
+ ptp_ocp_symlink(bp, child, link);
+ put_device(child);
+}
+
+static int
+ptp_ocp_complete(struct ptp_ocp *bp)
+{
+ struct pps_device *pps;
+ char buf[32];
+
+ if (bp->gnss_port != -1) {
+ sprintf(buf, "ttyS%d", bp->gnss_port);
+ ptp_ocp_link_child(bp, buf, "ttyGNSS");
+ }
+ if (bp->mac_port != -1) {
+ sprintf(buf, "ttyS%d", bp->mac_port);
+ ptp_ocp_link_child(bp, buf, "ttyMAC");
+ }
+ sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
+ ptp_ocp_link_child(bp, buf, "ptp");
+
+ pps = pps_lookup_dev(bp->ptp);
+ if (pps)
+ ptp_ocp_symlink(bp, pps->dev, "pps");
+
+ if (device_add_groups(&bp->dev, timecard_groups))
+ pr_err("device add groups failed\n");
+
+ return 0;
+}
+
+static void
+ptp_ocp_resource_summary(struct ptp_ocp *bp)
+{
+ struct device *dev = &bp->pdev->dev;
+
+ if (bp->image) {
+ u32 ver = ioread32(&bp->image->version);
+
+ dev_info(dev, "version %x\n", ver);
+ if (ver & 0xffff)
+ dev_info(dev, "regular image, version %d\n",
+ ver & 0xffff);
+ else
+ dev_info(dev, "golden image, version %d\n",
+ ver >> 16);
+ }
+ if (bp->gnss_port != -1)
+ dev_info(dev, "GNSS @ /dev/ttyS%d 115200\n", bp->gnss_port);
+ if (bp->mac_port != -1)
+ dev_info(dev, "MAC @ /dev/ttyS%d 57600\n", bp->mac_port);
+}
+
+static void
+ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
+{
+ struct device *dev = &bp->dev;
+
+ sysfs_remove_link(&dev->kobj, "ttyGNSS");
+ sysfs_remove_link(&dev->kobj, "ttyMAC");
+ sysfs_remove_link(&dev->kobj, "ptp");
+ sysfs_remove_link(&dev->kobj, "pps");
+ device_remove_groups(dev, timecard_groups);
+}
+
+static void
+ptp_ocp_detach(struct ptp_ocp *bp)
+{
+ ptp_ocp_detach_sysfs(bp);
+ if (timer_pending(&bp->watchdog))
+ del_timer_sync(&bp->watchdog);
+ if (bp->ts0)
+ ptp_ocp_unregister_ext(bp->ts0);
+ if (bp->ts1)
+ ptp_ocp_unregister_ext(bp->ts1);
+ if (bp->pps)
+ ptp_ocp_unregister_ext(bp->pps);
+ if (bp->gnss_port != -1)
+ serial8250_unregister_port(bp->gnss_port);
+ if (bp->mac_port != -1)
+ serial8250_unregister_port(bp->mac_port);
+ if (bp->spi_flash)
+ platform_device_unregister(bp->spi_flash);
+ if (bp->i2c_ctrl)
+ platform_device_unregister(bp->i2c_ctrl);
+ if (bp->i2c_clk)
+ clk_hw_unregister_fixed_rate(bp->i2c_clk);
+ if (bp->n_irqs)
+ pci_free_irq_vectors(bp->pdev);
+ if (bp->ptp)
+ ptp_clock_unregister(bp->ptp);
+ device_unregister(&bp->dev);
+}
+
+static int
+ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct devlink *devlink;
+ struct ptp_ocp *bp;
+ int err;
+
+ devlink = devlink_alloc(&ptp_ocp_devlink_ops, sizeof(*bp), &pdev->dev);
+ if (!devlink) {
+ dev_err(&pdev->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(devlink);
+ if (err)
+ goto out_free;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device\n");
- goto out_free;
+ goto out_unregister;
}
- err = pci_request_regions(pdev, KBUILD_MODNAME);
- if (err) {
- dev_err(&pdev->dev, "pci_request_region\n");
+ bp = devlink_priv(devlink);
+ err = ptp_ocp_device_init(bp, pdev);
+ if (err)
goto out_disable;
- }
- bp->base = pci_ioremap_bar(pdev, 0);
- if (!bp->base) {
- dev_err(&pdev->dev, "io_remap bar0\n");
- err = -ENOMEM;
- goto out_release_regions;
+ /* compat mode.
+ * Older FPGA firmware only returns 2 irq's.
+ * allow this - if not all of the IRQ's are returned, skip the
+ * extra devices and just register the clock.
+ */
+ err = pci_alloc_irq_vectors(pdev, 1, 10, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
+ goto out;
}
- bp->reg = bp->base + OCP_REGISTER_OFFSET;
- bp->tod = bp->base + TOD_REGISTER_OFFSET;
- bp->ptp_info = ptp_ocp_clock_info;
- spin_lock_init(&bp->lock);
+ bp->n_irqs = err;
+ pci_set_master(pdev);
- err = ptp_ocp_check_clock(bp);
+ err = ptp_ocp_register_resources(bp, id->driver_data);
if (err)
goto out;
bp->ptp = ptp_clock_register(&bp->ptp_info, &pdev->dev);
if (IS_ERR(bp->ptp)) {
- dev_err(&pdev->dev, "ptp_clock_register\n");
err = PTR_ERR(bp->ptp);
+ dev_err(&pdev->dev, "ptp_clock_register: %d\n", err);
+ bp->ptp = NULL;
goto out;
}
+ err = ptp_ocp_complete(bp);
+ if (err)
+ goto out;
+
ptp_ocp_info(bp);
+ ptp_ocp_resource_summary(bp);
return 0;
out:
- pci_iounmap(pdev, bp->base);
-out_release_regions:
- pci_release_regions(pdev);
+ ptp_ocp_detach(bp);
+ pci_set_drvdata(pdev, NULL);
out_disable:
pci_disable_device(pdev);
+out_unregister:
+ devlink_unregister(devlink);
out_free:
- kfree(bp);
+ devlink_free(devlink);
return err;
}
@@ -362,13 +1491,14 @@ static void
ptp_ocp_remove(struct pci_dev *pdev)
{
struct ptp_ocp *bp = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(bp);
- ptp_clock_unregister(bp->ptp);
- pci_iounmap(pdev, bp->base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+ ptp_ocp_detach(bp);
pci_set_drvdata(pdev, NULL);
- kfree(bp);
+ pci_disable_device(pdev);
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
}
static struct pci_driver ptp_ocp_driver = {
@@ -378,19 +1508,84 @@ static struct pci_driver ptp_ocp_driver = {
.remove = ptp_ocp_remove,
};
+static int
+ptp_ocp_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev, *child = data;
+ struct ptp_ocp *bp;
+ bool add;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_DEL_DEVICE:
+ add = action == BUS_NOTIFY_ADD_DEVICE;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!i2c_verify_adapter(child))
+ return 0;
+
+ dev = child;
+ while ((dev = dev->parent))
+ if (dev->driver && !strcmp(dev->driver->name, KBUILD_MODNAME))
+ goto found;
+ return 0;
+
+found:
+ bp = dev_get_drvdata(dev);
+ if (add)
+ ptp_ocp_symlink(bp, child, "i2c");
+ else
+ sysfs_remove_link(&bp->dev.kobj, "i2c");
+
+ return 0;
+}
+
+static struct notifier_block ptp_ocp_i2c_notifier = {
+ .notifier_call = ptp_ocp_i2c_notifier_call,
+};
+
static int __init
ptp_ocp_init(void)
{
+ const char *what;
int err;
+ what = "timecard class";
+ err = class_register(&timecard_class);
+ if (err)
+ goto out;
+
+ what = "i2c notifier";
+ err = bus_register_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
+ if (err)
+ goto out_notifier;
+
+ what = "ptp_ocp driver";
err = pci_register_driver(&ptp_ocp_driver);
+ if (err)
+ goto out_register;
+
+ return 0;
+
+out_register:
+ bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
+out_notifier:
+ class_unregister(&timecard_class);
+out:
+ pr_err(KBUILD_MODNAME ": failed to register %s: %d\n", what, err);
return err;
}
static void __exit
ptp_ocp_fini(void)
{
+ bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
pci_unregister_driver(&ptp_ocp_driver);
+ class_unregister(&timecard_class);
}
module_init(ptp_ocp_init);
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index e0f87c57749a..baee0379482b 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -149,6 +149,7 @@ void ptp_vclock_unregister(struct ptp_vclock *vclock)
kfree(vclock);
}
+#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
{
char name[PTP_CLOCK_NAME_LEN] = "";
@@ -217,3 +218,4 @@ void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
hwtstamps->hwtstamp = ns_to_ktime(ns);
}
EXPORT_SYMBOL(ptp_convert_timestamp);
+#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 9748165e08e9..acbe76a76fb2 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -504,28 +504,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
- * get_ccwgroupdev_by_busid() - obtain device from a bus id
- * @gdrv: driver the device is owned by
- * @bus_id: bus id of the device to be searched
- *
- * This function searches all devices owned by @gdrv for a device with a bus
- * id matching @bus_id.
- * Returns:
- * If a match is found, its reference count of the found device is increased
- * and it is returned; else %NULL is returned.
- */
-struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
- char *bus_id)
-{
- struct device *dev;
-
- dev = driver_find_device_by_name(&gdrv->driver, bus_id);
-
- return dev ? to_ccwgroupdev(dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
-
-/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index bf236d474538..9c67b97faba2 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -74,6 +74,7 @@ config QETH_L2
def_tristate y
prompt "qeth layer 2 device support"
depends on QETH
+ depends on BRIDGE || BRIDGE=n
help
Select this option to be able to run qeth devices in layer 2 mode.
To compile as a module, choose M. The module name is qeth_l2.
@@ -88,15 +89,6 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
-config QETH_OSN
- def_bool !HAVE_MARCH_Z14_FEATURES
- prompt "qeth OSN device support"
- depends on QETH
- help
- This enables the qeth driver to support devices in OSN mode.
- This feature will be removed in 2021.
- If unsure, choose N.
-
config QETH_OSX
def_bool !HAVE_MARCH_Z15_FEATURES
prompt "qeth OSX device support"
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 377e3689d1d4..06281a0a0552 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1444,7 +1444,7 @@ again:
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0],
sizeof(struct ccw1) * 3);
- dolock = !in_irq();
+ dolock = !in_hardirq();
if (dolock)
spin_lock_irqsave(
get_ccwdev_lock(ch->cdev), saveflags);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 19ee91acb89d..f0436f555c62 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1773,7 +1773,7 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
CTCM_D3_DUMP((char *)ch->xid_id, 4);
- if (!in_irq()) {
+ if (!in_hardirq()) {
/* Such conditional locking is a known problem for
* sparse because its static undeterministic.
* Warnings should be ignored here. */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f4d554ea0c93..535a60b3946d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -259,22 +259,10 @@ struct qeth_hdr_layer2 {
__u8 reserved2[16];
} __attribute__ ((packed));
-struct qeth_hdr_osn {
- __u8 id;
- __u8 reserved;
- __u16 seq_no;
- __u16 reserved2;
- __u16 control_flags;
- __u16 pdu_length;
- __u8 reserved3[18];
- __u32 ccid;
-} __attribute__ ((packed));
-
struct qeth_hdr {
union {
struct qeth_hdr_layer2 l2;
struct qeth_hdr_layer3 l3;
- struct qeth_hdr_osn osn;
} hdr;
} __attribute__ ((packed));
@@ -341,7 +329,6 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_L3_TSO = 0x03,
- QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
QETH_HEADER_MASK_INVAL = 0x80,
};
@@ -779,18 +766,13 @@ enum qeth_threads {
QETH_RECOVER_THREAD = 1,
};
-struct qeth_osn_info {
- int (*assist_cb)(struct net_device *dev, void *data);
- int (*data_cb)(struct sk_buff *skb);
-};
-
struct qeth_discipline {
- const struct device_type *devtype;
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
- int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
+ int (*do_ioctl)(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
};
@@ -865,7 +847,6 @@ struct qeth_card {
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
int read_or_write_problem;
- struct qeth_osn_info osn_info;
const struct qeth_discipline *discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
@@ -1058,10 +1039,7 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern const struct qeth_discipline qeth_l2_discipline;
extern const struct qeth_discipline qeth_l3_discipline;
extern const struct ethtool_ops qeth_ethtool_ops;
-extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_dev_groups[];
-extern const struct attribute_group *qeth_osn_dev_groups[];
-extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
@@ -1069,11 +1047,9 @@ int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
void qeth_remove_discipline(struct qeth_card *card);
/* exports for qeth discipline device drivers */
-extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
-struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
@@ -1088,9 +1064,6 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
unsigned int data_length);
-struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
- unsigned int length, unsigned int ccws,
- long timeout);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code,
@@ -1099,18 +1072,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
enum qeth_diags_cmds sub_cmd,
unsigned int data_length);
-void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
-void qeth_put_cmd(struct qeth_cmd_buffer *iob);
int qeth_schedule_recovery(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length,
- bool (*match)(struct qeth_cmd_buffer *iob,
- struct qeth_cmd_buffer *reply));
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
@@ -1118,12 +1085,9 @@ int qeth_query_card_info(struct qeth_card *card,
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes mode);
-unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- int elements_needed);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
@@ -1148,11 +1112,4 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr, struct sk_buff *skb,
__be16 proto, unsigned int data_len));
-/* exports for OSN */
-int qeth_osn_assist(struct net_device *, void *, int);
-int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
- int (*assist_cb)(struct net_device *, void *),
- int (*data_cb)(struct sk_buff *));
-void qeth_osn_deregister(struct net_device *);
-
#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 62f88ccbd03f..5b973f377504 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -57,8 +57,7 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
};
EXPORT_SYMBOL_GPL(qeth_dbf);
-struct kmem_cache *qeth_core_header_cache;
-EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_core_header_cache;
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
@@ -101,8 +100,6 @@ static const char *qeth_get_cardname(struct qeth_card *card)
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
- case QETH_CARD_TYPE_OSN:
- return " OSN QDIO";
case QETH_CARD_TYPE_OSM:
return " OSM QDIO";
case QETH_CARD_TYPE_OSX:
@@ -157,8 +154,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
}
case QETH_CARD_TYPE_IQD:
return "HiperSockets";
- case QETH_CARD_TYPE_OSN:
- return "OSN";
case QETH_CARD_TYPE_OSM:
return "OSM_1000";
case QETH_CARD_TYPE_OSX:
@@ -431,6 +426,13 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
+static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
+{
+ if (refcount_dec_and_test(&iob->ref_count)) {
+ kfree(iob->data);
+ kfree(iob);
+ }
+}
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -499,12 +501,11 @@ static void qeth_dequeue_cmd(struct qeth_card *card,
spin_unlock_irq(&card->lock);
}
-void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
+static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
{
iob->rc = reason;
complete(&iob->done);
}
-EXPORT_SYMBOL_GPL(qeth_notify_cmd);
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
@@ -781,10 +782,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command != IPA_CMD_SETCCID &&
- cmd->hdr.command != IPA_CMD_DELCCID &&
- cmd->hdr.command != IPA_CMD_MODCCID &&
- cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+ if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
return cmd;
}
@@ -819,8 +817,6 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (card->discipline->control_event_handler(card, cmd))
return cmd;
return NULL;
- case IPA_CMD_MODCCID:
- return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
@@ -877,15 +873,6 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
-void qeth_put_cmd(struct qeth_cmd_buffer *iob)
-{
- if (refcount_dec_and_test(&iob->ref_count)) {
- kfree(iob->data);
- kfree(iob);
- }
-}
-EXPORT_SYMBOL_GPL(qeth_put_cmd);
-
static void qeth_release_buffer_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
@@ -899,9 +886,9 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
qeth_put_cmd(iob);
}
-struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
- unsigned int length, unsigned int ccws,
- long timeout)
+static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length,
+ unsigned int ccws, long timeout)
{
struct qeth_cmd_buffer *iob;
@@ -927,7 +914,6 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
iob->length = length;
return iob;
}
-EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
static void qeth_issue_next_read_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
@@ -958,11 +944,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
- if (IS_OSN(card) && card->osn_info.assist_cb &&
- cmd->hdr.command != IPA_CMD_STARTLAN) {
- card->osn_info.assist_cb(card->dev, cmd);
- goto out;
- }
}
/* match against pending cmd requests */
@@ -1835,7 +1816,7 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (IS_OSM(card) || IS_OSN(card))
+ if (IS_OSM(card))
disc = QETH_DISCIPLINE_LAYER2;
else if (IS_VM_NIC(card))
disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
@@ -1885,7 +1866,6 @@ static void qeth_idx_init(struct qeth_card *card)
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
break;
case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
@@ -2442,9 +2422,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
- if (IS_OSN(card))
- return QETH_PROT_OSN2;
- return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+ return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
}
static int qeth_ulp_enable(struct qeth_card *card)
@@ -3000,10 +2978,8 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
}
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length,
- bool (*match)(struct qeth_cmd_buffer *iob,
- struct qeth_cmd_buffer *reply))
+static void qeth_prepare_ipa_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob, u16 cmd_length)
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
@@ -3011,7 +2987,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
- iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -3022,7 +2997,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
}
-EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
struct qeth_cmd_buffer *reply)
@@ -3046,7 +3020,8 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
if (!iob)
return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
+ qeth_prepare_ipa_cmd(card, iob, data_length);
+ iob->match = qeth_ipa_match_reply;
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
@@ -3894,7 +3869,8 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
* Returns the number of pages, and thus QDIO buffer elements, needed to map the
* skb's data (both its linear part and paged fragments).
*/
-unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
+static unsigned int qeth_count_elements(struct sk_buff *skb,
+ unsigned int data_offset)
{
unsigned int elements = qeth_get_elements_for_frags(skb);
addr_t end = (addr_t)skb->data + skb_headlen(skb);
@@ -3904,7 +3880,6 @@ unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
elements += qeth_get_elements_for_range(start, end);
return elements;
}
-EXPORT_SYMBOL_GPL(qeth_count_elements);
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
MAX_TCP_HEADER)
@@ -4192,10 +4167,11 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
return 0;
}
-int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- int elements_needed)
+static int qeth_do_send_packet(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len,
+ unsigned int elements_needed)
{
unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
@@ -4275,7 +4251,6 @@ out:
netif_tx_start_queue(txq);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
unsigned int payload_len, struct sk_buff *skb,
@@ -4554,7 +4529,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
- (card->info.link_type != QETH_LINK_TYPE_OSN) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
@@ -5266,10 +5240,6 @@ static struct ccw_device_id qeth_ids[] = {
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
-#ifdef CONFIG_QETH_OSN
- {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
- .driver_info = QETH_CARD_TYPE_OSN},
-#endif
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
#ifdef CONFIG_QETH_OSX
@@ -5628,14 +5598,6 @@ static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
bool is_cso;
switch (hdr->hdr.l2.id) {
- case QETH_HEADER_TYPE_OSN:
- skb_push(skb, sizeof(*hdr));
- skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
- QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
- QETH_CARD_STAT_INC(card, rx_packets);
-
- card->osn_info.data_cb(skb);
- return;
#if IS_ENABLED(CONFIG_QETH_L3)
case QETH_HEADER_TYPE_LAYER3:
qeth_l3_rebuild_skb(card, skb, hdr);
@@ -5750,16 +5712,6 @@ next_packet:
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
- case QETH_HEADER_TYPE_OSN:
- skb_len = hdr->hdr.osn.pdu_length;
- if (!IS_OSN(card)) {
- QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
- goto walk_packet;
- }
-
- linear_len = skb_len;
- headroom = sizeof(struct qeth_hdr);
- break;
default:
if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
@@ -5777,8 +5729,7 @@ next_packet:
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
(skb_len > READ_ONCE(priv->rx_copybreak) &&
- !atomic_read(&card->force_alloc_skb) &&
- !IS_OSN(card));
+ !atomic_read(&card->force_alloc_skb));
if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
@@ -6335,14 +6286,9 @@ void qeth_remove_discipline(struct qeth_card *card)
card->discipline = NULL;
}
-const struct device_type qeth_generic_devtype = {
+static const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
};
-EXPORT_SYMBOL_GPL(qeth_generic_devtype);
-
-static const struct device_type qeth_osn_devtype = {
- .name = "qeth_osn",
-};
#define DBF_NAME_LEN 20
@@ -6425,10 +6371,6 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
case QETH_CARD_TYPE_OSM:
dev = alloc_etherdev(sizeof(*priv));
break;
- case QETH_CARD_TYPE_OSN:
- dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
default:
dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
}
@@ -6442,23 +6384,19 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
- dev->min_mtu = IS_OSN(card) ? 64 : 576;
+ dev->min_mtu = 576;
/* initialized when device first goes online: */
dev->max_mtu = 0;
dev->mtu = 0;
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (IS_OSN(card)) {
- dev->ethtool_ops = &qeth_osn_ethtool_ops;
- } else {
- dev->ethtool_ops = &qeth_ethtool_ops;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card))
- dev->features |= NETIF_F_SG;
- }
+ dev->ethtool_ops = &qeth_ethtool_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
return dev;
}
@@ -6521,10 +6459,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_chp_desc;
- if (IS_OSN(card))
- gdev->dev.groups = qeth_osn_dev_groups;
- else
- gdev->dev.groups = qeth_dev_groups;
+ gdev->dev.groups = qeth_dev_groups;
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -6538,8 +6473,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_setup_disc;
- gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
- card->discipline->devtype;
break;
}
@@ -6657,36 +6590,42 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.shutdown = qeth_core_shutdown,
};
-struct qeth_card *qeth_get_card_by_busid(char *bus_id)
-{
- struct ccwgroup_device *gdev;
- struct qeth_card *card;
-
- gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
- if (!gdev)
- return NULL;
-
- card = dev_get_drvdata(&gdev->dev);
- put_device(&gdev->dev);
- return card;
-}
-EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
-
-int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
- struct mii_ioctl_data *mii_data;
int rc = 0;
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
- rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+ rc = qeth_snmp_command(card, data);
break;
case SIOC_QETH_GET_CARD_TYPE:
if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
!IS_VM_NIC(card))
return 1;
return 0;
+ case SIOC_QETH_QUERY_OAT:
+ rc = qeth_query_oat_command(card, data);
+ break;
+ default:
+ if (card->discipline->do_ioctl)
+ rc = card->discipline->do_ioctl(dev, rq, data, cmd);
+ else
+ rc = -EOPNOTSUPP;
+ }
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
+
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct mii_ioctl_data *mii_data;
+ int rc = 0;
+
+ switch (cmd) {
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
@@ -6699,14 +6638,8 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
mii_data->val_out = qeth_mdio_read(dev,
mii_data->phy_id, mii_data->reg_num);
break;
- case SIOC_QETH_QUERY_OAT:
- rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
- break;
default:
- if (card->discipline->do_ioctl)
- rc = card->discipline->do_ioctl(dev, rq, cmd);
- else
- rc = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 68c2588b9dcc..d9266f7d8187 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -232,9 +232,6 @@ static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELVLAN, "delvlan"},
{IPA_CMD_VNICC, "vnic_characteristics"},
{IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"},
- {IPA_CMD_SETCCID, "setccid"},
- {IPA_CMD_DELCCID, "delccid"},
- {IPA_CMD_MODCCID, "modccid"},
{IPA_CMD_SETIP, "setip"},
{IPA_CMD_QIPASSIST, "qipassist"},
{IPA_CMD_SETASSPARMS, "setassparms"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e4bde7daf083..6257f00786b3 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -34,8 +34,6 @@ extern const unsigned char IPA_PDU_HEADER[];
/*****************************************************************************/
#define IPA_CMD_INITIATOR_HOST 0x00
#define IPA_CMD_INITIATOR_OSA 0x01
-#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
-#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
struct qeth_ipa_caps {
@@ -66,7 +64,6 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
- QETH_CARD_TYPE_OSN = 6,
QETH_CARD_TYPE_OSM = 3,
QETH_CARD_TYPE_OSX = 2,
};
@@ -75,12 +72,6 @@ enum qeth_card_types {
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
-#ifdef CONFIG_QETH_OSN
-#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
-#else
-#define IS_OSN(card) false
-#endif
-
#ifdef CONFIG_QETH_OSX
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
#else
@@ -95,7 +86,6 @@ enum qeth_link_types {
QETH_LINK_TYPE_FAST_ETH = 0x01,
QETH_LINK_TYPE_HSTR = 0x02,
QETH_LINK_TYPE_GBIT_ETH = 0x03,
- QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
@@ -126,9 +116,6 @@ enum qeth_ipa_cmds {
IPA_CMD_DELVLAN = 0x26,
IPA_CMD_VNICC = 0x2a,
IPA_CMD_SETBRIDGEPORT_OSA = 0x2b,
- IPA_CMD_SETCCID = 0x41,
- IPA_CMD_DELCCID = 0x42,
- IPA_CMD_MODCCID = 0x43,
IPA_CMD_SETIP = 0xb1,
IPA_CMD_QIPASSIST = 0xb2,
IPA_CMD_SETASSPARMS = 0xb3,
@@ -879,8 +866,7 @@ extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
/* Helper functions */
-#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
- (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+#define IS_IPA_REPLY(cmd) ((cmd)->hdr.initiator == IPA_CMD_INITIATOR_HOST)
/*****************************************************************************/
/* END OF IP Assist related definitions */
@@ -919,10 +905,9 @@ extern const unsigned char ULP_ENABLE[];
(PDU_ENCAPSULATION(buffer) + 0x17)
#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x2b)
-/* Layer 2 definitions */
-#define QETH_PROT_LAYER2 0x08
-#define QETH_PROT_TCPIP 0x03
-#define QETH_PROT_OSN2 0x0a
+
+#define QETH_MPC_PROT_L2 0x08
+#define QETH_MPC_PROT_L3 0x03
#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 5815114da468..406be169173c 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -671,11 +671,6 @@ static const struct attribute_group qeth_dev_group = {
.attrs = qeth_dev_attrs,
};
-const struct attribute_group *qeth_osn_dev_groups[] = {
- &qeth_dev_group,
- NULL,
-};
-
const struct attribute_group *qeth_dev_groups[] = {
&qeth_dev_group,
&qeth_dev_extended_group,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 2c4cb300a8fc..3937986f159a 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -469,10 +469,3 @@ const struct ethtool_ops qeth_ethtool_ops = {
.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings,
};
-
-const struct ethtool_ops qeth_osn_ethtool_ops = {
- .get_strings = qeth_get_strings,
- .get_ethtool_stats = qeth_get_ethtool_stats,
- .get_sset_count = qeth_get_sset_count,
- .get_drvinfo = qeth_get_drvinfo,
-};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d7cdd9cfe485..72e84ff9fea5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -309,17 +309,16 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
- if (!IS_OSN(card)) {
- rc = qeth_setadpparms_change_macaddr(card);
- if (!rc)
- goto out;
- QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
- CARD_DEVID(card), rc);
- QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
- /* fall back once more: */
- }
+ rc = qeth_setadpparms_change_macaddr(card);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
- /* some devices don't support a custom MAC address: */
+ /* Fall back once more, but some devices don't support a custom MAC
+ * address:
+ */
if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
@@ -334,7 +333,7 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
if (!is_valid_ether_addr(card->dev->dev_addr))
qeth_l2_request_initial_mac(card);
- if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
card->info.dev_addr_is_registered = 1;
else
card->info.dev_addr_is_registered = 0;
@@ -496,44 +495,6 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
qeth_l2_set_promisc_mode(card);
}
-static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue)
-{
- gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
- struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
- addr_t end = (addr_t)(skb->data + sizeof(*hdr));
- addr_t start = (addr_t)skb->data;
- unsigned int elements = 0;
- unsigned int hd_len = 0;
- int rc;
-
- if (skb->protocol == htons(ETH_P_IPV6))
- return -EPROTONOSUPPORT;
-
- if (qeth_get_elements_for_range(start, end) > 1) {
- /* Misaligned HW header, move it to its own buffer element. */
- hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
- if (!hdr)
- return -ENOMEM;
- hd_len = sizeof(*hdr);
- skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
- elements++;
- }
-
- elements += qeth_count_elements(skb, hd_len);
- if (elements > queue->max_elements) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
- elements);
-out:
- if (rc && hd_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- return rc;
-}
-
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -548,12 +509,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq];
- if (IS_OSN(card))
- rc = qeth_l2_xmit_osn(card, skb, queue);
- else
- rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
- qeth_l2_fill_header);
-
+ rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
+ qeth_l2_fill_header);
if (!rc)
return NETDEV_TX_OK;
@@ -760,6 +717,227 @@ static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
return rc;
}
+struct qeth_l2_br2dev_event_work {
+ struct work_struct work;
+ struct net_device *br_dev;
+ struct net_device *lsync_dev;
+ struct net_device *dst_dev;
+ unsigned long event;
+ unsigned char addr[ETH_ALEN];
+};
+
+static const struct net_device_ops qeth_l2_netdev_ops;
+
+static bool qeth_l2_must_learn(struct net_device *netdev,
+ struct net_device *dstdev)
+{
+ struct qeth_priv *priv;
+
+ priv = netdev_priv(netdev);
+ return (netdev != dstdev &&
+ (priv->brport_features & BR_LEARNING_SYNC) &&
+ !(br_port_flag_is_set(netdev, BR_ISOLATED) &&
+ br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
+ netdev->netdev_ops == &qeth_l2_netdev_ops);
+}
+
+/**
+ * qeth_l2_br2dev_worker() - update local MACs
+ * @work: bridge to device FDB update
+ *
+ * Update local MACs of a learning_sync bridgeport so it can receive
+ * messages for a destination port.
+ * In case of an isolated learning_sync port, also update its isolated
+ * siblings.
+ */
+static void qeth_l2_br2dev_worker(struct work_struct *work)
+{
+ struct qeth_l2_br2dev_event_work *br2dev_event_work =
+ container_of(work, struct qeth_l2_br2dev_event_work, work);
+ struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
+ struct net_device *dstdev = br2dev_event_work->dst_dev;
+ struct net_device *brdev = br2dev_event_work->br_dev;
+ unsigned long event = br2dev_event_work->event;
+ unsigned char *addr = br2dev_event_work->addr;
+ struct qeth_card *card = lsyncdev->ml_priv;
+ struct net_device *lowerdev;
+ struct list_head *iter;
+ int err = 0;
+
+ kfree(br2dev_event_work);
+ QETH_CARD_TEXT_(card, 4, "b2dw%04x", event);
+ QETH_CARD_TEXT_(card, 4, "ma%012lx", ether_addr_to_u64(addr));
+
+ rcu_read_lock();
+ /* Verify preconditions are still valid: */
+ if (!netif_is_bridge_port(lsyncdev) ||
+ brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
+ goto unlock;
+ if (!qeth_l2_must_learn(lsyncdev, dstdev))
+ goto unlock;
+
+ if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
+ /* Update lsyncdev and its isolated sibling(s): */
+ iter = &brdev->adj_list.lower;
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ while (lowerdev) {
+ if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = dev_uc_add(lowerdev, addr);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = dev_uc_del(lowerdev, addr);
+ break;
+ default:
+ break;
+ }
+ if (err) {
+ QETH_CARD_TEXT(card, 2, "b2derris");
+ QETH_CARD_TEXT_(card, 2,
+ "err%02x%03d", event,
+ lowerdev->ifindex);
+ }
+ }
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ }
+ } else {
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = dev_uc_add(lsyncdev, addr);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = dev_uc_del(lsyncdev, addr);
+ break;
+ default:
+ break;
+ }
+ if (err)
+ QETH_CARD_TEXT_(card, 2, "b2derr%02x", event);
+ }
+
+unlock:
+ rcu_read_unlock();
+ dev_put(brdev);
+ dev_put(lsyncdev);
+ dev_put(dstdev);
+}
+
+static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
+ struct net_device *lsyncdev,
+ struct net_device *dstdev,
+ unsigned long event,
+ const unsigned char *addr)
+{
+ struct qeth_l2_br2dev_event_work *worker_data;
+ struct qeth_card *card;
+
+ worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
+ if (!worker_data)
+ return -ENOMEM;
+ INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
+ worker_data->br_dev = brdev;
+ worker_data->lsync_dev = lsyncdev;
+ worker_data->dst_dev = dstdev;
+ worker_data->event = event;
+ ether_addr_copy(worker_data->addr, addr);
+
+ card = lsyncdev->ml_priv;
+ /* Take a reference on the sw port devices and the bridge */
+ dev_hold(brdev);
+ dev_hold(lsyncdev);
+ dev_hold(dstdev);
+ queue_work(card->event_wq, &worker_data->work);
+ return 0;
+}
+
+/* Called under rtnl_lock */
+static int qeth_l2_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dstdev, *brdev, *lowerdev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct list_head *iter;
+ struct qeth_card *card;
+ int rc;
+
+ if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
+ event == SWITCHDEV_FDB_DEL_TO_DEVICE))
+ return NOTIFY_DONE;
+
+ dstdev = switchdev_notifier_info_to_dev(info);
+ brdev = netdev_master_upper_dev_get_rcu(dstdev);
+ if (!brdev || !netif_is_bridge_master(brdev))
+ return NOTIFY_DONE;
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ iter = &brdev->adj_list.lower;
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ while (lowerdev) {
+ if (qeth_l2_must_learn(lowerdev, dstdev)) {
+ card = lowerdev->ml_priv;
+ QETH_CARD_TEXT_(card, 4, "b2dqw%03x", event);
+ rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
+ dstdev, event,
+ fdb_info->addr);
+ if (rc) {
+ QETH_CARD_TEXT(card, 2, "b2dqwerr");
+ return NOTIFY_BAD;
+ }
+ }
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qeth_l2_sw_notifier = {
+ .notifier_call = qeth_l2_switchdev_event,
+};
+
+static refcount_t qeth_l2_switchdev_notify_refcnt;
+
+/* Called under rtnl_lock */
+static void qeth_l2_br2dev_get(void)
+{
+ int rc;
+
+ if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
+ rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "failed to register qeth_l2_sw_notifier: %d\n",
+ rc);
+ } else {
+ refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
+ QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
+ }
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
+ qeth_l2_switchdev_notify_refcnt.refs.counter);
+}
+
+/* Called under rtnl_lock */
+static void qeth_l2_br2dev_put(void)
+{
+ int rc;
+
+ if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
+ rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "failed to unregister qeth_l2_sw_notifier: %d\n",
+ rc);
+ } else {
+ QETH_DBF_MESSAGE(2,
+ "qeth_l2_sw_notifier unregistered\n");
+ }
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
+ qeth_l2_switchdev_notify_refcnt.refs.counter);
+}
+
static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
int nlflags)
@@ -853,16 +1031,19 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
} else if (enable) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
rc = qeth_l2_dev2br_an_set(card, true);
- if (rc)
+ if (rc) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
- else
+ } else {
priv->brport_features |= BR_LEARNING_SYNC;
+ qeth_l2_br2dev_get();
+ }
} else {
rc = qeth_l2_dev2br_an_set(card, false);
if (!rc) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
priv->brport_features ^= BR_LEARNING_SYNC;
qeth_l2_dev2br_fdb_flush(card);
+ qeth_l2_br2dev_put();
}
}
mutex_unlock(&card->sbp_lock);
@@ -879,7 +1060,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
@@ -890,23 +1072,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
-static const struct net_device_ops qeth_osn_netdev_ops = {
- .ndo_open = qeth_open,
- .ndo_stop = qeth_stop,
- .ndo_get_stats64 = qeth_get_stats64,
- .ndo_start_xmit = qeth_l2_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = qeth_tx_timeout,
-};
-
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- if (IS_OSN(card)) {
- card->dev->netdev_ops = &qeth_osn_netdev_ops;
- card->dev->flags |= IFF_NOARP;
- goto add_napi;
- }
-
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -952,7 +1119,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
-add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
return register_netdev(card->dev);
}
@@ -1044,84 +1210,6 @@ static void qeth_l2_enable_brport_features(struct qeth_card *card)
}
}
-#ifdef CONFIG_QETH_OSN
-static void qeth_osn_assist_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int data_length)
-{
- qeth_notify_cmd(iob, 0);
- qeth_put_cmd(iob);
-}
-
-int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
-{
- struct qeth_cmd_buffer *iob;
- struct qeth_card *card;
-
- if (data_len < 0)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- card = dev->ml_priv;
- if (!card)
- return -ENODEV;
- QETH_CARD_TEXT(card, 2, "osnsdmc");
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
-
- iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_len, 1,
- QETH_IPA_TIMEOUT);
- if (!iob)
- return -ENOMEM;
-
- qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
-
- memcpy(__ipa_cmd(iob), data, data_len);
- iob->callback = qeth_osn_assist_cb;
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-EXPORT_SYMBOL(qeth_osn_assist);
-
-int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
- int (*assist_cb)(struct net_device *, void *),
- int (*data_cb)(struct sk_buff *))
-{
- struct qeth_card *card;
- char bus_id[16];
- u16 devno;
-
- memcpy(&devno, read_dev_no, 2);
- sprintf(bus_id, "0.0.%04x", devno);
- card = qeth_get_card_by_busid(bus_id);
- if (!card || !IS_OSN(card))
- return -ENODEV;
- *dev = card->dev;
-
- QETH_CARD_TEXT(card, 2, "osnreg");
- if ((assist_cb == NULL) || (data_cb == NULL))
- return -EINVAL;
- card->osn_info.assist_cb = assist_cb;
- card->osn_info.data_cb = data_cb;
- return 0;
-}
-EXPORT_SYMBOL(qeth_osn_register);
-
-void qeth_osn_deregister(struct net_device *dev)
-{
- struct qeth_card *card;
-
- if (!dev)
- return;
- card = dev->ml_priv;
- if (!card)
- return;
- QETH_CARD_TEXT(card, 2, "osndereg");
- card->osn_info.assist_cb = NULL;
- card->osn_info.data_cb = NULL;
-}
-EXPORT_SYMBOL(qeth_osn_deregister);
-#endif
-
/* SETBRIDGEPORT support, async notifications */
enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
@@ -2190,16 +2278,15 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- if (IS_OSN(card))
- dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
-
qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
- if (gdev->dev.type == &qeth_generic_devtype) {
+ if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
if (rc)
return rc;
+ } else {
+ gdev->dev.type = &qeth_l2_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
@@ -2209,9 +2296,11 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct qeth_priv *priv;
- if (gdev->dev.type == &qeth_generic_devtype)
+ if (gdev->dev.type != &qeth_l2_devtype)
device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
+
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -2219,8 +2308,15 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
- if (card->dev->reg_state == NETREG_REGISTERED)
+ if (card->dev->reg_state == NETREG_REGISTERED) {
+ priv = netdev_priv(card->dev);
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ rtnl_lock();
+ qeth_l2_br2dev_put();
+ rtnl_unlock();
+ }
unregister_netdev(card->dev);
+ }
}
static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
@@ -2331,7 +2427,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
}
const struct qeth_discipline qeth_l2_discipline = {
- .devtype = &qeth_l2_devtype,
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
@@ -2344,6 +2439,7 @@ EXPORT_SYMBOL_GPL(qeth_l2_discipline);
static int __init qeth_l2_init(void)
{
pr_info("register layer 2 discipline\n");
+ refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f0d6f205c53c..3a523e700a5a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1512,7 +1512,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
return rc;
}
-static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
@@ -1532,13 +1532,13 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EPERM;
break;
}
- rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
+ rc = qeth_l3_arp_query(card, data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
case SIOC_QETH_ARP_REMOVE_ENTRY:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+ if (copy_from_user(&arp_entry, data, sizeof(arp_entry)))
return -EFAULT;
arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
@@ -1841,7 +1841,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1856,7 +1857,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1940,12 +1942,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
if (!card->cmd_wq)
return -ENOMEM;
- if (gdev->dev.type == &qeth_generic_devtype) {
+ if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups);
if (rc) {
destroy_workqueue(card->cmd_wq);
return rc;
}
+ } else {
+ gdev->dev.type = &qeth_l3_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
@@ -1956,7 +1960,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- if (cgdev->dev.type == &qeth_generic_devtype)
+ if (cgdev->dev.type != &qeth_l3_devtype)
device_remove_groups(&cgdev->dev, qeth_l3_attr_groups);
qeth_set_allowed_threads(card, 0, 1);
@@ -2065,7 +2069,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
}
const struct qeth_discipline qeth_l3_discipline = {
- .devtype = &qeth_l3_devtype,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index 8b0deece9758..63c8a0f3cd0c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -2,6 +2,7 @@
config SCSI_CXGB4_ISCSI
tristate "Chelsio T4 iSCSI support"
depends on PCI && INET && (IPV6 || IPV6=n)
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on THERMAL || !THERMAL
depends on ETHERNET
depends on TLS || TLS=n
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index dcbba9621b21..5d24c1b6663b 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -524,7 +524,7 @@ static const struct net_device_ops cvm_oct_npi_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -540,7 +540,7 @@ static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -556,7 +556,7 @@ static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -570,7 +570,7 @@ static const struct net_device_ops cvm_oct_spi_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -586,7 +586,7 @@ static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -599,7 +599,7 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
.ndo_start_xmit = cvm_oct_xmit_pow,
.ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
.ndo_set_mac_address = cvm_oct_common_set_mac_address,
- .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_eth_ioctl = cvm_oct_ioctl,
.ndo_change_mtu = cvm_oct_common_change_mtu,
.ndo_get_stats = cvm_oct_common_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 19a02e958865..8fcdf89da8aa 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -4547,7 +4547,8 @@ static int qlge_probe(struct pci_dev *pdev,
static int cards_found;
int err;
- devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
+ devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
+ &pdev->dev);
if (!devlink)
return -ENOMEM;
@@ -4613,7 +4614,7 @@ static int qlge_probe(struct pci_dev *pdev,
goto netdev_free;
}
- err = devlink_register(devlink, &pdev->dev);
+ err = devlink_register(devlink);
if (err)
goto netdev_free;
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 5012b9176526..34decb03e92f 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -22,6 +22,8 @@ void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int rtw_android_priv_cmd(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
struct net_device *rtw_init_netdev(void);
u16 rtw_recv_select_queue(struct sk_buff *skb);
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index 2c26993b8205..3018fc1e8de8 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -45,6 +45,7 @@ enum ANDROID_WIFI_CMD {
ANDROID_WIFI_CMD_MAX
};
-int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr,
+ void __user *data, int cmd);
#endif /* __RTW_ANDROID_H__ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index b958a8d882b0..193a3dde462c 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2769,9 +2769,6 @@ int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
break;
#endif /* CONFIG_88EU_AP_MODE */
- case (SIOCDEVPRIVATE + 1):
- ret = rtw_android_priv_cmd(dev, rq, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 423c382e3d20..596e03e7b286 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -288,6 +288,7 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
.ndo_do_ioctl = rtw_ioctl,
+ .ndo_siocdevprivate = rtw_android_priv_cmd,
};
static const struct device_type wlan_type = {
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 3c5446999686..a13df3880378 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -5,6 +5,7 @@
*
******************************************************************************/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -116,7 +117,8 @@ static int android_get_p2p_addr(struct net_device *net, char *command,
return ETH_ALEN;
}
-int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr,
+ void __user *data, int cmd)
{
int ret = 0;
char *command;
@@ -124,9 +126,15 @@ int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
int bytes_written = 0;
struct android_wifi_priv_cmd priv_cmd;
- if (!ifr->ifr_data)
+ if (cmd != SIOCDEVPRIVATE)
+ return -EOPNOTSUPP;
+
+ if (in_compat_syscall()) /* to be implemented */
+ return -EOPNOTSUPP;
+
+ if (!data)
return -EINVAL;
- if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(priv_cmd)))
+ if (copy_from_user(&priv_cmd, data, sizeof(priv_cmd)))
return -EFAULT;
if (priv_cmd.total_len < 1)
return -EINVAL;
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 111e0179712a..5badd441c14b 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -48,6 +48,8 @@ void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int rtw_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index f95000df8942..aa7bd76bb5f1 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -4485,6 +4485,21 @@ exit:
return err;
}
+int rtw_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)rq;
+
+ /* little hope of fixing this, better remove the whole function */
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
+
+ if (cmd != SIOCDEVPRIVATE)
+ return -EOPNOTSUPP;
+
+ return rtw_ioctl_wext_private(dev, &wrq->u);
+}
+
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct iwreq *wrq = (struct iwreq *)rq;
@@ -4497,9 +4512,6 @@ int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case RTL_IOCTL_HOSTAPD:
ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
break;
- case SIOCDEVPRIVATE:
- ret = rtw_ioctl_wext_private(dev, &wrq->u);
- break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 648456b992bb..9e38b53d3b4a 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -459,6 +459,7 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
.ndo_do_ioctl = rtw_ioctl,
+ .ndo_siocdevprivate = rtw_siocdevprivate,
};
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 6f470e7ba647..1c62130a5eee 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -98,8 +98,8 @@ static int p80211knetdev_stop(struct net_device *netdev);
static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev);
static void p80211knetdev_set_multicast_list(struct net_device *dev);
-static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd);
+static int p80211knetdev_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ void __user *data, int cmd);
static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr);
static void p80211knetdev_tx_timeout(struct net_device *netdev, unsigned int txqueue);
static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc);
@@ -461,56 +461,8 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev)
wlandev->set_multicast_list(wlandev, dev);
}
-#ifdef SIOCETHTOOL
-
-static int p80211netdev_ethtool(struct wlandevice *wlandev,
- void __user *useraddr)
-{
- u32 ethcmd;
- struct ethtool_drvinfo info;
- struct ethtool_value edata;
-
- memset(&info, 0, sizeof(info));
- memset(&edata, 0, sizeof(edata));
-
- if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
- return -EFAULT;
-
- switch (ethcmd) {
- case ETHTOOL_GDRVINFO:
- info.cmd = ethcmd;
- snprintf(info.driver, sizeof(info.driver), "p80211_%s",
- wlandev->nsdname);
- snprintf(info.version, sizeof(info.version), "%s",
- WLAN_RELEASE);
-
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-#ifdef ETHTOOL_GLINK
- case ETHTOOL_GLINK:
- edata.cmd = ethcmd;
-
- if (wlandev->linkstatus &&
- (wlandev->macmode != WLAN_MACMODE_NONE)) {
- edata.data = 1;
- } else {
- edata.data = 0;
- }
-
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-#endif
- }
-
- return -EOPNOTSUPP;
-}
-
-#endif
-
/*----------------------------------------------------------------
- * p80211knetdev_do_ioctl
+ * p80211knetdev_siocdevprivate
*
* Handle an ioctl call on one of our devices. Everything Linux
* ioctl specific is done here. Then we pass the contents of the
@@ -537,8 +489,9 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev,
* locks.
*----------------------------------------------------------------
*/
-static int p80211knetdev_do_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
+static int p80211knetdev_siocdevprivate(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd)
{
int result = 0;
struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
@@ -547,13 +500,8 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
netdev_dbg(dev, "rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len);
-#ifdef SIOCETHTOOL
- if (cmd == SIOCETHTOOL) {
- result =
- p80211netdev_ethtool(wlandev, (void __user *)ifr->ifr_data);
- goto bail;
- }
-#endif
+ if (in_compat_syscall())
+ return -EOPNOTSUPP;
/* Test the magic, assume ifr is good if it's there */
if (req->magic != P80211_IOCTL_MAGIC) {
@@ -569,7 +517,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
goto bail;
}
- msgbuf = memdup_user(req->data, req->len);
+ msgbuf = memdup_user(data, req->len);
if (IS_ERR(msgbuf)) {
result = PTR_ERR(msgbuf);
goto bail;
@@ -578,10 +526,8 @@ static int p80211knetdev_do_ioctl(struct net_device *dev,
result = p80211req_dorequest(wlandev, msgbuf);
if (result == 0) {
- if (copy_to_user
- (req->data, msgbuf, req->len)) {
+ if (copy_to_user(data, msgbuf, req->len))
result = -EFAULT;
- }
}
kfree(msgbuf);
@@ -682,7 +628,7 @@ static const struct net_device_ops p80211_netdev_ops = {
.ndo_stop = p80211knetdev_stop,
.ndo_start_xmit = p80211knetdev_hard_start_xmit,
.ndo_set_rx_mode = p80211knetdev_set_multicast_list,
- .ndo_do_ioctl = p80211knetdev_do_ioctl,
+ .ndo_siocdevprivate = p80211knetdev_siocdevprivate,
.ndo_set_mac_address = p80211knetdev_set_mac_address,
.ndo_tx_timeout = p80211knetdev_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 5bb928b7873e..3e3b8873fa29 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1524,11 +1524,11 @@ static int hdlcdev_close(struct net_device *dev)
*
* Return: 0 if success, otherwise error code
*/
-static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int hdlcdev_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
- sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
struct slgt_info *info = dev_to_port(dev);
unsigned int flags;
@@ -1538,17 +1538,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (info->port.count)
return -EBUSY;
- if (cmd != SIOCWANDEV)
- return hdlc_ioctl(dev, ifr, cmd);
-
memset(&new_line, 0, sizeof(new_line));
- switch(ifr->ifr_settings.type) {
+ switch (ifs->type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
- ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
- if (ifr->ifr_settings.size < size) {
- ifr->ifr_settings.size = size; /* data size wanted */
+ ifs->type = IF_IFACE_SYNC_SERIAL;
+ if (ifs->size < size) {
+ ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
@@ -1615,7 +1612,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
default:
- return hdlc_ioctl(dev, ifr, cmd);
+ return hdlc_ioctl(dev, ifs);
}
}
@@ -1688,7 +1685,7 @@ static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
.ndo_start_xmit = hdlc_start_xmit,
- .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_siocwandev = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
};
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 3cc12fcab08d..5906cada2293 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -572,7 +572,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
- MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));