summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c62
-rw-r--r--drivers/net/ethernet/8390/Kconfig17
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c228
-rw-r--r--drivers/net/ethernet/8390/xsurf100.c382
-rw-r--r--drivers/net/ethernet/Kconfig15
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c137
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c217
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c167
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c36
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c349
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c164
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c166
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c124
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c60
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c510
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c727
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c372
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c259
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c15
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h87
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c52
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h79
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c87
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c125
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c123
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c196
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c73
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c18
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c18
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c12
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c572
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c565
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c694
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c98
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c190
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c223
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e100.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_osdep.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile27
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c36
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c136
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c27
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c94
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c542
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c197
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c84
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c147
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c111
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h36
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c59
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c7
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h23
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h36
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c404
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h26
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile27
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c467
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h26
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c8956
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1046
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c141
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h44
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5281
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2467
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h314
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c327
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c197
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c696
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c538
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c458
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c334
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c562
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c175
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c11
-rw-r--r--drivers/net/ethernet/mscc/Kconfig30
-rw-r--r--drivers/net/ethernet/mscc/Makefile5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1333
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h572
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ana.h625
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c316
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev.h275
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev_gmii.h154
-rw-r--r--drivers/net/ethernet/mscc/ocelot_hsio.h785
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c116
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qs.h78
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h270
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c497
-rw-r--r--drivers/net/ethernet/mscc/ocelot_rew.h81
-rw-r--r--drivers/net/ethernet/mscc/ocelot_sys.h144
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/netronome/Kconfig13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile8
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c333
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c765
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h142
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c764
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h66
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c174
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c131
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c726
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h61
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c52
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_abi.h143
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c145
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c23
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c180
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c95
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c59
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c43
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h74
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c610
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c117
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h724
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c71
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c46
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c170
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c99
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c1337
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c247
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h21
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c663
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c227
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c183
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c11
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c140
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h32
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c21
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c24
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c55
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1144
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c118
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h40
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c10
-rw-r--r--drivers/net/ethernet/sfc/efx.c36
-rw-r--r--drivers/net/ethernet/sfc/tx.c33
-rw-r--r--drivers/net/ethernet/socionext/Kconfig2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c27
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c252
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h236
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c120
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c267
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c269
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c268
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h477
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c734
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c295
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c115
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c10
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c27
-rw-r--r--drivers/net/ethernet/ti/netcp.h3
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c32
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c182
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
565 files changed, 40094 insertions, 22150 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 176861bd2252..5bc168314ea2 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -765,8 +765,9 @@ static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int vortex_rx(struct net_device *dev);
static int boomerang_rx(struct net_device *dev);
-static irqreturn_t vortex_interrupt(int irq, void *dev_id);
-static irqreturn_t boomerang_interrupt(int irq, void *dev_id);
+static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
+static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
+static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
static int vortex_close(struct net_device *dev);
static void dump_tx_ring(struct net_device *dev);
static void update_stats(void __iomem *ioaddr, struct net_device *dev);
@@ -838,11 +839,7 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_vortex(struct net_device *dev)
{
- struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
- local_irq_save(flags);
- (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
- local_irq_restore(flags);
+ vortex_boomerang_interrupt(dev->irq, dev);
}
#endif
@@ -1728,8 +1725,7 @@ vortex_open(struct net_device *dev)
dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
- if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
- boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
+ if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
goto err;
}
@@ -1904,18 +1900,7 @@ static void vortex_tx_timeout(struct net_device *dev)
pr_err("%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
- {
- /*
- * Block interrupts because vortex_interrupt does a bare spin_lock()
- */
- unsigned long flags;
- local_irq_save(flags);
- if (vp->full_bus_master_tx)
- boomerang_interrupt(dev->irq, dev);
- else
- vortex_interrupt(dev->irq, dev);
- local_irq_restore(flags);
- }
+ vortex_boomerang_interrupt(dev->irq, dev);
}
if (vortex_debug > 0)
@@ -2266,9 +2251,8 @@ out_dma_err:
*/
static irqreturn_t
-vortex_interrupt(int irq, void *dev_id)
+_vortex_interrupt(int irq, struct net_device *dev)
{
- struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
@@ -2277,7 +2261,6 @@ vortex_interrupt(int irq, void *dev_id)
unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
- spin_lock(&vp->lock);
status = ioread16(ioaddr + EL3_STATUS);
@@ -2375,7 +2358,6 @@ vortex_interrupt(int irq, void *dev_id)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
- spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
@@ -2385,9 +2367,8 @@ handler_exit:
*/
static irqreturn_t
-boomerang_interrupt(int irq, void *dev_id)
+_boomerang_interrupt(int irq, struct net_device *dev)
{
- struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
@@ -2397,12 +2378,6 @@ boomerang_interrupt(int irq, void *dev_id)
ioaddr = vp->ioaddr;
-
- /*
- * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
- * and boomerang_start_xmit
- */
- spin_lock(&vp->lock);
vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2521,10 +2496,29 @@ boomerang_interrupt(int irq, void *dev_id)
dev->name, status);
handler_exit:
vp->handling_irq = 0;
- spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
+static irqreturn_t
+vortex_boomerang_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&vp->lock, flags);
+
+ if (vp->full_bus_master_rx)
+ ret = _boomerang_interrupt(dev->irq, dev);
+ else
+ ret = _vortex_interrupt(dev->irq, dev);
+
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ return ret;
+}
+
static int vortex_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 9fee7c83ef9f..f2f0264c58ba 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -29,8 +29,8 @@ config PCMCIA_AXNET
called axnet_cs. If unsure, say N.
config AX88796
- tristate "ASIX AX88796 NE2000 clone support"
- depends on (ARM || MIPS || SUPERH)
+ tristate "ASIX AX88796 NE2000 clone support" if !ZORRO
+ depends on (ARM || MIPS || SUPERH || ZORRO || COMPILE_TEST)
select CRC32
select PHYLIB
select MDIO_BITBANG
@@ -45,6 +45,19 @@ config AX88796_93CX6
---help---
Select this if your platform comes with an external 93CX6 eeprom.
+config XSURF100
+ tristate "Amiga XSurf 100 AX88796/NE2000 clone support"
+ depends on ZORRO
+ select AX88796
+ select ASIX_PHY
+ help
+ This driver is for the Individual Computers X-Surf 100 Ethernet
+ card (based on the Asix AX88796 chip). If you have such a card,
+ say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called xsurf100.
+
config HYDRA
tristate "Hydra support"
depends on ZORRO
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 1d650e66cc6e..85c83c566ec6 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
+obj-$(CONFIG_XSURF100) += xsurf100.o
obj-$(CONFIG_ZORRO8390) += zorro8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index da61cf3cb3a9..2a0ddec1dd56 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -163,6 +163,21 @@ static void ax_reset_8390(struct net_device *dev)
ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */
}
+/* Wrapper for __ei_interrupt for platforms that have a platform-specific
+ * way to find out whether the interrupt request might be caused by
+ * the ax88796 chip.
+ */
+static irqreturn_t ax_ei_interrupt_filtered(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ax_device *ax = to_ax_dev(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+
+ if (!ax->plat->check_irq(pdev))
+ return IRQ_NONE;
+
+ return ax_ei_interrupt(irq, dev_id);
+}
static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
@@ -387,6 +402,90 @@ static void ax_phy_switch(struct net_device *dev, int on)
ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
}
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (level)
+ ax->reg_memr |= AX_MEMR_MDC;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDC;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (output)
+ ax->reg_memr &= ~AX_MEMR_MDIR;
+ else
+ ax->reg_memr |= AX_MEMR_MDIR;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (value)
+ ax->reg_memr |= AX_MEMR_MDO;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDO;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+ int reg_memr = ei_inb(ax->addr_memr);
+
+ return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static const struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ax_bb_mdc,
+ .set_mdio_dir = ax_bb_dir,
+ .set_mdio_data = ax_bb_set_data,
+ .get_mdio_data = ax_bb_get_data,
+};
+
+static int ax_mii_init(struct net_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ int err;
+
+ ax->bb_ctrl.ops = &bb_ops;
+ ax->addr_memr = ei_local->mem + AX_MEMR;
+ ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+ if (!ax->mii_bus) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ax->mii_bus->name = "ax88796_mii_bus";
+ ax->mii_bus->parent = dev->dev.parent;
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ err = mdiobus_register(ax->mii_bus);
+ if (err)
+ goto out_free_mdio_bitbang;
+
+ return 0;
+
+ out_free_mdio_bitbang:
+ free_mdio_bitbang(ax->mii_bus);
+ out:
+ return err;
+}
+
static int ax_open(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
@@ -394,8 +493,16 @@ static int ax_open(struct net_device *dev)
netdev_dbg(dev, "open\n");
- ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
- dev->name, dev);
+ ret = ax_mii_init(dev);
+ if (ret)
+ goto failed_mii;
+
+ if (ax->plat->check_irq)
+ ret = request_irq(dev->irq, ax_ei_interrupt_filtered,
+ ax->irqflags, dev->name, dev);
+ else
+ ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
+ dev->name, dev);
if (ret)
goto failed_request_irq;
@@ -421,6 +528,10 @@ static int ax_open(struct net_device *dev)
ax_phy_switch(dev, 0);
free_irq(dev->irq, dev);
failed_request_irq:
+ /* unregister mdiobus */
+ mdiobus_unregister(ax->mii_bus);
+ free_mdio_bitbang(ax->mii_bus);
+ failed_mii:
return ret;
}
@@ -440,6 +551,9 @@ static int ax_close(struct net_device *dev)
phy_disconnect(dev->phydev);
free_irq(dev->irq, dev);
+
+ mdiobus_unregister(ax->mii_bus);
+ free_mdio_bitbang(ax->mii_bus);
return 0;
}
@@ -539,92 +653,8 @@ static const struct net_device_ops ax_netdev_ops = {
#endif
};
-static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
-
- if (level)
- ax->reg_memr |= AX_MEMR_MDC;
- else
- ax->reg_memr &= ~AX_MEMR_MDC;
-
- ei_outb(ax->reg_memr, ax->addr_memr);
-}
-
-static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
-
- if (output)
- ax->reg_memr &= ~AX_MEMR_MDIR;
- else
- ax->reg_memr |= AX_MEMR_MDIR;
-
- ei_outb(ax->reg_memr, ax->addr_memr);
-}
-
-static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
-
- if (value)
- ax->reg_memr |= AX_MEMR_MDO;
- else
- ax->reg_memr &= ~AX_MEMR_MDO;
-
- ei_outb(ax->reg_memr, ax->addr_memr);
-}
-
-static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
- int reg_memr = ei_inb(ax->addr_memr);
-
- return reg_memr & AX_MEMR_MDI ? 1 : 0;
-}
-
-static const struct mdiobb_ops bb_ops = {
- .owner = THIS_MODULE,
- .set_mdc = ax_bb_mdc,
- .set_mdio_dir = ax_bb_dir,
- .set_mdio_data = ax_bb_set_data,
- .get_mdio_data = ax_bb_get_data,
-};
-
/* setup code */
-static int ax_mii_init(struct net_device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev->dev.parent);
- struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
- int err;
-
- ax->bb_ctrl.ops = &bb_ops;
- ax->addr_memr = ei_local->mem + AX_MEMR;
- ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
- if (!ax->mii_bus) {
- err = -ENOMEM;
- goto out;
- }
-
- ax->mii_bus->name = "ax88796_mii_bus";
- ax->mii_bus->parent = dev->dev.parent;
- snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- pdev->name, pdev->id);
-
- err = mdiobus_register(ax->mii_bus);
- if (err)
- goto out_free_mdio_bitbang;
-
- return 0;
-
- out_free_mdio_bitbang:
- free_mdio_bitbang(ax->mii_bus);
- out:
- return err;
-}
-
static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
{
void __iomem *ioaddr = ei_local->mem;
@@ -669,10 +699,16 @@ static int ax_init_dev(struct net_device *dev)
if (ax->plat->flags & AXFLG_HAS_EEPROM) {
unsigned char SA_prom[32];
+ ei_outb(6, ioaddr + EN0_RCNTLO);
+ ei_outb(0, ioaddr + EN0_RCNTHI);
+ ei_outb(0, ioaddr + EN0_RSARLO);
+ ei_outb(0, ioaddr + EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, ioaddr + NE_CMD);
for (i = 0; i < sizeof(SA_prom); i += 2) {
SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
}
+ ei_outb(ENISR_RDC, ioaddr + EN0_ISR); /* Ack intr. */
if (ax->plat->wordlength == 2)
for (i = 0; i < 16; i++)
@@ -741,18 +777,20 @@ static int ax_init_dev(struct net_device *dev)
#endif
ei_local->reset_8390 = &ax_reset_8390;
- ei_local->block_input = &ax_block_input;
- ei_local->block_output = &ax_block_output;
+ if (ax->plat->block_input)
+ ei_local->block_input = ax->plat->block_input;
+ else
+ ei_local->block_input = &ax_block_input;
+ if (ax->plat->block_output)
+ ei_local->block_output = ax->plat->block_output;
+ else
+ ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
- ret = ax_mii_init(dev);
- if (ret)
- goto err_out;
-
ax_NS8390_init(dev, 0);
ret = register_netdev(dev);
@@ -777,7 +815,6 @@ static int ax_remove(struct platform_device *pdev)
struct resource *mem;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
iounmap(ei_local->mem);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -789,6 +826,7 @@ static int ax_remove(struct platform_device *pdev)
release_mem_region(mem->start, resource_size(mem));
}
+ platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
@@ -835,6 +873,9 @@ static int ax_probe(struct platform_device *pdev)
dev->irq = irq->start;
ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
+ if (irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ ax->irqflags |= IRQF_SHARED;
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no MEM specified\n");
@@ -919,6 +960,7 @@ static int ax_probe(struct platform_device *pdev)
release_mem_region(mem->start, mem_size);
exit_mem:
+ platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return ret;
diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c
new file mode 100644
index 000000000000..e2c963821ffe
--- /dev/null
+++ b/drivers/net/ethernet/8390/xsurf100.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/zorro.h>
+#include <net/ax88796.h>
+#include <asm/amigaints.h>
+
+#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \
+ ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0)
+
+#define XS100_IRQSTATUS_BASE 0x40
+#define XS100_8390_BASE 0x800
+
+/* Longword-access area. Translated to 2 16-bit access cycles by the
+ * X-Surf 100 FPGA
+ */
+#define XS100_8390_DATA32_BASE 0x8000
+#define XS100_8390_DATA32_SIZE 0x2000
+/* Sub-Areas for fast data register access; addresses relative to area begin */
+#define XS100_8390_DATA_READ32_BASE 0x0880
+#define XS100_8390_DATA_WRITE32_BASE 0x0C80
+#define XS100_8390_DATA_AREA_SIZE 0x80
+
+#define __NS8390_init ax_NS8390_init
+
+/* force unsigned long back to 'void __iomem *' */
+#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
+
+#define ei_inb(_a) z_readb(ax_convert_addr(_a))
+#define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a))
+
+#define ei_inw(_a) z_readw(ax_convert_addr(_a))
+#define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a))
+
+#define ei_inb_p(_a) ei_inb(_a)
+#define ei_outb_p(_v, _a) ei_outb(_v, _a)
+
+/* define EI_SHIFT() to take into account our register offsets */
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
+
+/* Ensure we have our RCR base value */
+#define AX88796_PLATFORM
+
+static unsigned char version[] =
+ "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
+
+#include "lib8390.c"
+
+/* from ne.c */
+#define NE_CMD EI_SHIFT(0x00)
+#define NE_RESET EI_SHIFT(0x1f)
+#define NE_DATAPORT EI_SHIFT(0x10)
+
+struct xsurf100_ax_plat_data {
+ struct ax_plat_data ax;
+ void __iomem *base_regs;
+ void __iomem *data_area;
+};
+
+static int is_xsurf100_network_irq(struct platform_device *pdev)
+{
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0;
+}
+
+/* These functions guarantee that the iomem is accessed with 32 bit
+ * cycles only. z_memcpy_fromio / z_memcpy_toio don't
+ */
+static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes)
+{
+ while (bytes > 32) {
+ asm __volatile__
+ ("movem.l (%0)+,%%d0-%%d7\n"
+ "movem.l %%d0-%%d7,(%1)\n"
+ "adda.l #32,%1" : "=a"(src), "=a"(dst)
+ : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4",
+ "d5", "d6", "d7", "memory");
+ bytes -= 32;
+ }
+ while (bytes) {
+ *(uint32_t *)dst = z_readl(src);
+ src += 4;
+ dst += 4;
+ bytes -= 4;
+ }
+}
+
+static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes)
+{
+ while (bytes) {
+ z_writel(*(const uint32_t *)src, dst);
+ src += 4;
+ dst += 4;
+ bytes -= 4;
+ }
+}
+
+static void xs100_write(struct net_device *dev, const void *src,
+ unsigned int count)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ /* copy whole blocks */
+ while (count > XS100_8390_DATA_AREA_SIZE) {
+ z_memcpy_toio32(xs100->data_area +
+ XS100_8390_DATA_WRITE32_BASE, src,
+ XS100_8390_DATA_AREA_SIZE);
+ src += XS100_8390_DATA_AREA_SIZE;
+ count -= XS100_8390_DATA_AREA_SIZE;
+ }
+ /* copy whole dwords */
+ z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE,
+ src, count & ~3);
+ src += count & ~3;
+ if (count & 2) {
+ ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT);
+ src += 2;
+ }
+ if (count & 1)
+ ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT);
+}
+
+static void xs100_read(struct net_device *dev, void *dst, unsigned int count)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ /* copy whole blocks */
+ while (count > XS100_8390_DATA_AREA_SIZE) {
+ z_memcpy_fromio32(dst, xs100->data_area +
+ XS100_8390_DATA_READ32_BASE,
+ XS100_8390_DATA_AREA_SIZE);
+ dst += XS100_8390_DATA_AREA_SIZE;
+ count -= XS100_8390_DATA_AREA_SIZE;
+ }
+ /* copy whole dwords */
+ z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE,
+ count & ~3);
+ dst += count & ~3;
+ if (count & 2) {
+ *(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT);
+ dst += 2;
+ }
+ if (count & 1)
+ *(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
+static void xs100_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
+
+ xs100_read(dev, buf, count);
+
+ ei_local->dmaing &= ~1;
+}
+
+static void xs100_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+ unsigned long dma_start;
+
+ /* Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390? I
+ * should check someday.
+ */
+ if (ei_local->word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing
+ * you'll see
+ */
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD);
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(0x00, nic_base + EN0_RSARLO);
+ ei_outb(start_page, nic_base + EN0_RSARHI);
+
+ ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
+
+ xs100_write(dev, buf, count);
+
+ dma_start = jiffies;
+
+ while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
+ if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ ei_local->reset_8390(dev);
+ ax_NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_local->dmaing &= ~0x01;
+}
+
+static int xsurf100_probe(struct zorro_dev *zdev,
+ const struct zorro_device_id *ent)
+{
+ struct platform_device *pdev;
+ struct xsurf100_ax_plat_data ax88796_data;
+ struct resource res[2] = {
+ DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL,
+ IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE),
+ DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE,
+ 4 * 0x20)
+ };
+ int reg;
+ /* This table is referenced in the device structure, so it must
+ * outlive the scope of xsurf100_probe.
+ */
+ static u32 reg_offsets[32];
+ int ret = 0;
+
+ /* X-Surf 100 control and 32 bit ring buffer data access areas.
+ * These resources are not used by the ax88796 driver, so must
+ * be requested here and passed via platform data.
+ */
+
+ if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) {
+ dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n");
+ return -ENXIO;
+ }
+
+ if (!request_mem_region(zdev->resource.start +
+ XS100_8390_DATA32_BASE,
+ XS100_8390_DATA32_SIZE,
+ "X-Surf 100 32-bit data access")) {
+ dev_err(&zdev->dev, "cannot reserve 32-bit area\n");
+ ret = -ENXIO;
+ goto exit_req;
+ }
+
+ for (reg = 0; reg < 0x20; reg++)
+ reg_offsets[reg] = 4 * reg;
+
+ memset(&ax88796_data, 0, sizeof(ax88796_data));
+ ax88796_data.ax.flags = AXFLG_HAS_EEPROM;
+ ax88796_data.ax.wordlength = 2;
+ ax88796_data.ax.dcr_val = 0x48;
+ ax88796_data.ax.rcr_val = 0x40;
+ ax88796_data.ax.reg_offsets = reg_offsets;
+ ax88796_data.ax.check_irq = is_xsurf100_network_irq;
+ ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100);
+
+ /* error handling for ioremap regs */
+ if (!ax88796_data.base_regs) {
+ dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n",
+ &zdev->resource);
+
+ ret = -ENXIO;
+ goto exit_req2;
+ }
+
+ ax88796_data.data_area = ioremap(zdev->resource.start +
+ XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE);
+
+ /* error handling for ioremap data */
+ if (!ax88796_data.data_area) {
+ dev_err(&zdev->dev,
+ "Cannot ioremap area %pR offset %x (32-bit access)\n",
+ &zdev->resource, XS100_8390_DATA32_BASE);
+
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ ax88796_data.ax.block_output = xs100_block_output;
+ ax88796_data.ax.block_input = xs100_block_input;
+
+ pdev = platform_device_register_resndata(&zdev->dev, "ax88796",
+ zdev->slotaddr, res, 2,
+ &ax88796_data,
+ sizeof(ax88796_data));
+
+ if (IS_ERR(pdev)) {
+ dev_err(&zdev->dev, "cannot register platform device\n");
+ ret = -ENXIO;
+ goto exit_mem2;
+ }
+
+ zorro_set_drvdata(zdev, pdev);
+
+ if (!ret)
+ return 0;
+
+ exit_mem2:
+ iounmap(ax88796_data.data_area);
+
+ exit_mem:
+ iounmap(ax88796_data.base_regs);
+
+ exit_req2:
+ release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
+ XS100_8390_DATA32_SIZE);
+
+ exit_req:
+ release_mem_region(zdev->resource.start, 0x100);
+
+ return ret;
+}
+
+static void xsurf100_remove(struct zorro_dev *zdev)
+{
+ struct platform_device *pdev = zorro_get_drvdata(zdev);
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ platform_device_unregister(pdev);
+
+ iounmap(xs100->base_regs);
+ release_mem_region(zdev->resource.start, 0x100);
+ iounmap(xs100->data_area);
+ release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
+ XS100_8390_DATA32_SIZE);
+}
+
+static const struct zorro_device_id xsurf100_zorro_tbl[] = {
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl);
+
+static struct zorro_driver xsurf100_driver = {
+ .name = "xsurf100",
+ .id_table = xsurf100_zorro_tbl,
+ .probe = xsurf100_probe,
+ .remove = xsurf100_remove,
+};
+
+module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver);
+
+MODULE_DESCRIPTION("X-Surf 100 driver");
+MODULE_AUTHOR("Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 603a5704dab8..af766fd61151 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,9 +33,9 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/aurora/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
@@ -72,16 +72,16 @@ source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
+source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/i825xx/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -115,6 +115,7 @@ source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -160,20 +161,21 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
+source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
-source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/silan/Kconfig"
-source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
+source "drivers/net/ethernet/silan/Kconfig"
+source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
@@ -182,6 +184,5 @@ source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
-source "drivers/net/ethernet/synopsys/Kconfig"
endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2bfd2eea50bf..8fbfe9ce2fa5 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index c99e3e845ac0..a90080f12e67 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1074,16 +1074,12 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE;
}
-
- }
- else if(tx_pkt_size >= 1024){
- if (tx_pkt_size >= 1024){
- if(coal_conf->tx_coal_type != HIGH_COALESCE){
- coal_conf->tx_timeout = 4;
- coal_conf->tx_event_count = 8;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
- coal_conf->tx_coal_type = HIGH_COALESCE;
- }
+ } else if (tx_pkt_size >= 1024) {
+ if (coal_conf->tx_coal_type != HIGH_COALESCE) {
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
}
}
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7c204f05b418..24f1053b8785 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1312,14 +1312,83 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
return 0;
}
+static void xgbe_free_memory(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+
+ /* Free the ring descriptors and buffers */
+ desc_if->free_ring_resources(pdata);
+
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+}
+
+static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ if (pdata->new_tx_ring_count) {
+ pdata->tx_ring_count = pdata->new_tx_ring_count;
+ pdata->tx_q_count = pdata->tx_ring_count;
+ pdata->new_tx_ring_count = 0;
+ }
+
+ if (pdata->new_rx_ring_count) {
+ pdata->rx_ring_count = pdata->new_rx_ring_count;
+ pdata->new_rx_ring_count = 0;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+
+ /* Allocate the channel and ring structures */
+ ret = xgbe_alloc_channels(pdata);
+ if (ret)
+ return ret;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_channels;
+
+ /* Initialize the service and Tx timers */
+ xgbe_init_timers(pdata);
+
+ return 0;
+
+err_channels:
+ xgbe_free_memory(pdata);
+
+ return ret;
+}
+
static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct net_device *netdev = pdata->netdev;
+ unsigned int i;
int ret;
- DBGPR("-->xgbe_start\n");
+ /* Set the number of queues */
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
+ netdev_err(netdev, "error setting real tx queue count\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ netdev_err(netdev, "error setting real rx queue count\n");
+ return ret;
+ }
+
+ /* Set RSS lookup table data for programming */
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
ret = hw_if->init(pdata);
if (ret)
@@ -1347,8 +1416,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
clear_bit(XGBE_STOPPED, &pdata->dev_state);
- DBGPR("<--xgbe_start\n");
-
return 0;
err_irqs:
@@ -1426,10 +1493,22 @@ static void xgbe_stopdev(struct work_struct *work)
netdev_alert(pdata->netdev, "device stopped\n");
}
-static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
{
- DBGPR("-->xgbe_restart_dev\n");
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+
+ xgbe_free_memory(pdata);
+ xgbe_alloc_memory(pdata);
+
+ xgbe_start(pdata);
+}
+void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+{
/* If not running, "restart" will happen on open */
if (!netif_running(pdata->netdev))
return;
@@ -1440,8 +1519,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
xgbe_free_rx_data(pdata);
xgbe_start(pdata);
-
- DBGPR("<--xgbe_restart_dev\n");
}
static void xgbe_restart(struct work_struct *work)
@@ -1827,11 +1904,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
int ret;
- DBGPR("-->xgbe_open\n");
-
/* Create the various names based on netdev name */
snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
netdev_name(netdev));
@@ -1876,43 +1950,25 @@ static int xgbe_open(struct net_device *netdev)
goto err_sysclk;
}
- /* Calculate the Rx buffer size before allocating rings */
- ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
- if (ret < 0)
- goto err_ptpclk;
- pdata->rx_buf_size = ret;
-
- /* Allocate the channel and ring structures */
- ret = xgbe_alloc_channels(pdata);
- if (ret)
- goto err_ptpclk;
-
- /* Allocate the ring descriptors and buffers */
- ret = desc_if->alloc_ring_resources(pdata);
- if (ret)
- goto err_channels;
-
INIT_WORK(&pdata->service_work, xgbe_service);
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
- xgbe_init_timers(pdata);
+
+ ret = xgbe_alloc_memory(pdata);
+ if (ret)
+ goto err_ptpclk;
ret = xgbe_start(pdata);
if (ret)
- goto err_rings;
+ goto err_mem;
clear_bit(XGBE_DOWN, &pdata->dev_state);
- DBGPR("<--xgbe_open\n");
-
return 0;
-err_rings:
- desc_if->free_ring_resources(pdata);
-
-err_channels:
- xgbe_free_channels(pdata);
+err_mem:
+ xgbe_free_memory(pdata);
err_ptpclk:
clk_disable_unprepare(pdata->ptpclk);
@@ -1932,18 +1988,11 @@ err_dev_wq:
static int xgbe_close(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_desc_if *desc_if = &pdata->desc_if;
-
- DBGPR("-->xgbe_close\n");
/* Stop the device */
xgbe_stop(pdata);
- /* Free the ring descriptors and buffers */
- desc_if->free_ring_resources(pdata);
-
- /* Free the channel and ring structures */
- xgbe_free_channels(pdata);
+ xgbe_free_memory(pdata);
/* Disable the clocks */
clk_disable_unprepare(pdata->ptpclk);
@@ -1957,8 +2006,6 @@ static int xgbe_close(struct net_device *netdev)
set_bit(XGBE_DOWN, &pdata->dev_state);
- DBGPR("<--xgbe_close\n");
-
return 0;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index ff397bb25042..a880f10e3e70 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -626,6 +626,217 @@ static int xgbe_get_ts_info(struct net_device *netdev,
return 0;
}
+static int xgbe_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->phy_if.module_info(pdata, modinfo);
+}
+
+static int xgbe_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->phy_if.module_eeprom(pdata, eeprom, data);
+}
+
+static void xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
+ ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
+ ringparam->rx_pending = pdata->rx_desc_count;
+ ringparam->tx_pending = pdata->tx_desc_count;
+}
+
+static int xgbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int rx, tx;
+
+ if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
+ netdev_err(netdev, "unsupported ring parameter\n");
+ return -EINVAL;
+ }
+
+ if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
+ (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
+ netdev_err(netdev,
+ "rx ring parameter must be between %u and %u\n",
+ XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
+ return -EINVAL;
+ }
+
+ if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
+ (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
+ netdev_err(netdev,
+ "tx ring parameter must be between %u and %u\n",
+ XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
+ return -EINVAL;
+ }
+
+ rx = __rounddown_pow_of_two(ringparam->rx_pending);
+ if (rx != ringparam->rx_pending)
+ netdev_notice(netdev,
+ "rx ring parameter rounded to power of two: %u\n",
+ rx);
+
+ tx = __rounddown_pow_of_two(ringparam->tx_pending);
+ if (tx != ringparam->tx_pending)
+ netdev_notice(netdev,
+ "tx ring parameter rounded to power of two: %u\n",
+ tx);
+
+ if ((rx == pdata->rx_desc_count) &&
+ (tx == pdata->tx_desc_count))
+ goto out;
+
+ pdata->rx_desc_count = rx;
+ pdata->tx_desc_count = tx;
+
+ xgbe_restart_dev(pdata);
+
+out:
+ return 0;
+}
+
+static void xgbe_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int rx, tx, combined;
+
+ /* Calculate maximums allowed:
+ * - Take into account the number of available IRQs
+ * - Do not take into account the number of online CPUs so that
+ * the user can over-subscribe if desired
+ * - Tx is additionally limited by the number of hardware queues
+ */
+ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
+ rx = min(rx, pdata->channel_irq_count);
+ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
+ tx = min(tx, pdata->channel_irq_count);
+ tx = min(tx, pdata->tx_max_q_count);
+
+ combined = min(rx, tx);
+
+ channels->max_combined = combined;
+ channels->max_rx = rx ? rx - 1 : 0;
+ channels->max_tx = tx ? tx - 1 : 0;
+
+ /* Get current settings based on device state */
+ rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
+ tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
+
+ combined = min(rx, tx);
+ rx -= combined;
+ tx -= combined;
+
+ channels->combined_count = combined;
+ channels->rx_count = rx;
+ channels->tx_count = tx;
+}
+
+static void xgbe_print_set_channels_input(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
+ channels->combined_count, channels->rx_count,
+ channels->tx_count);
+}
+
+static int xgbe_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int rx, rx_curr, tx, tx_curr, combined;
+
+ /* Calculate maximums allowed:
+ * - Take into account the number of available IRQs
+ * - Do not take into account the number of online CPUs so that
+ * the user can over-subscribe if desired
+ * - Tx is additionally limited by the number of hardware queues
+ */
+ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
+ rx = min(rx, pdata->channel_irq_count);
+ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
+ tx = min(tx, pdata->tx_max_q_count);
+ tx = min(tx, pdata->channel_irq_count);
+
+ combined = min(rx, tx);
+
+ /* Should not be setting other count */
+ if (channels->other_count) {
+ netdev_err(netdev,
+ "other channel count must be zero\n");
+ return -EINVAL;
+ }
+
+ /* Require at least one Combined (Rx and Tx) channel */
+ if (!channels->combined_count) {
+ netdev_err(netdev,
+ "at least one combined Rx/Tx channel is required\n");
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ /* Check combined channels */
+ if (channels->combined_count > combined) {
+ netdev_err(netdev,
+ "combined channel count cannot exceed %u\n",
+ combined);
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ /* Can have some Rx-only or Tx-only channels, but not both */
+ if (channels->rx_count && channels->tx_count) {
+ netdev_err(netdev,
+ "cannot specify both Rx-only and Tx-only channels\n");
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ /* Check that we don't exceed the maximum number of channels */
+ if ((channels->combined_count + channels->rx_count) > rx) {
+ netdev_err(netdev,
+ "total Rx channels (%u) requested exceeds maximum available (%u)\n",
+ channels->combined_count + channels->rx_count, rx);
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ if ((channels->combined_count + channels->tx_count) > tx) {
+ netdev_err(netdev,
+ "total Tx channels (%u) requested exceeds maximum available (%u)\n",
+ channels->combined_count + channels->tx_count, tx);
+ xgbe_print_set_channels_input(netdev, channels);
+ return -EINVAL;
+ }
+
+ rx = channels->combined_count + channels->rx_count;
+ tx = channels->combined_count + channels->tx_count;
+
+ rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
+ tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
+
+ if ((rx == rx_curr) && (tx == tx_curr))
+ goto out;
+
+ pdata->new_rx_ring_count = rx;
+ pdata->new_tx_ring_count = tx;
+
+ xgbe_full_restart_dev(pdata);
+
+out:
+ return 0;
+}
+
static const struct ethtool_ops xgbe_ethtool_ops = {
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
@@ -646,6 +857,12 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_ts_info = xgbe_get_ts_info,
.get_link_ksettings = xgbe_get_link_ksettings,
.set_link_ksettings = xgbe_set_link_ksettings,
+ .get_module_info = xgbe_get_module_info,
+ .get_module_eeprom = xgbe_get_module_eeprom,
+ .get_ringparam = xgbe_get_ringparam,
+ .set_ringparam = xgbe_set_ringparam,
+ .get_channels = xgbe_get_channels,
+ .set_channels = xgbe_set_channels,
};
const struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 441d0973957b..b41f23679a08 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
{
struct net_device *netdev = pdata->netdev;
struct device *dev = pdata->dev;
- unsigned int i;
int ret;
netdev->irq = pdata->dev_irq;
@@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
pdata->tx_ring_count, pdata->rx_ring_count);
}
- /* Set the number of queues */
- ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
- if (ret) {
- dev_err(dev, "error setting real tx queue count\n");
- return ret;
- }
-
- ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
- if (ret) {
- dev_err(dev, "error setting real rx queue count\n");
- return ret;
- }
-
- /* Initialize RSS hash key and lookup table */
+ /* Initialize RSS hash key */
netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
- for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
- XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
- i % pdata->rx_ring_count);
-
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 1b45cd73a258..4b5d625de8f0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -126,6 +126,24 @@
#include "xgbe.h"
#include "xgbe-common.h"
+static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ if (!pdata->phy_if.phy_impl.module_eeprom)
+ return -ENXIO;
+
+ return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data);
+}
+
+static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo)
+{
+ if (!pdata->phy_if.phy_impl.module_info)
+ return -ENXIO;
+
+ return pdata->phy_if.phy_impl.module_info(pdata, modinfo);
+}
+
static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
{
int reg;
@@ -198,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
xgbe_an37_clear_interrupts(pdata);
}
-static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
-{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-
- reg |= XGBE_KR_TRAINING_ENABLE;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
-}
-
-static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
-{
- unsigned int reg;
-
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-
- reg &= ~XGBE_KR_TRAINING_ENABLE;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
-}
-
static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
{
- /* Enable KR training */
- xgbe_an73_enable_kr_training(pdata);
-
/* Set MAC to 10G speed */
pdata->hw_if.set_speed(pdata, SPEED_10000);
@@ -232,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
{
- /* Disable KR training */
- xgbe_an73_disable_kr_training(pdata);
-
/* Set MAC to 2.5G speed */
pdata->hw_if.set_speed(pdata, SPEED_2500);
@@ -244,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
{
- /* Disable KR training */
- xgbe_an73_disable_kr_training(pdata);
-
/* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000);
@@ -260,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
if (pdata->kr_redrv)
return xgbe_kr_mode(pdata);
- /* Disable KR training */
- xgbe_an73_disable_kr_training(pdata);
-
/* Set MAC to 10G speed */
pdata->hw_if.set_speed(pdata, SPEED_10000);
@@ -272,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
static void xgbe_x_mode(struct xgbe_prv_data *pdata)
{
- /* Disable KR training */
- xgbe_an73_disable_kr_training(pdata);
-
/* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000);
@@ -284,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata)
static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
{
- /* Disable KR training */
- xgbe_an73_disable_kr_training(pdata);
-
/* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000);
@@ -296,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
- /* Disable KR training */
- xgbe_an73_disable_kr_training(pdata);
-
/* Set MAC to 1G speed */
pdata->hw_if.set_speed(pdata, SPEED_1000);
@@ -354,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
}
-static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+static bool xgbe_set_mode(struct xgbe_prv_data *pdata,
enum xgbe_mode mode)
{
if (mode == xgbe_cur_mode(pdata))
- return;
+ return false;
xgbe_change_mode(pdata, mode);
+
+ return true;
}
static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
@@ -407,6 +386,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
{
unsigned int reg;
+ /* Disable KR training for now */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+
+ /* Update AN settings */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
reg &= ~MDIO_AN_CTRL1_ENABLE;
@@ -504,21 +489,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
/* Start KR training */
- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (reg & XGBE_KR_TRAINING_ENABLE) {
- if (pdata->phy_if.phy_impl.kr_training_pre)
- pdata->phy_if.phy_impl.kr_training_pre(pdata);
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
- reg |= XGBE_KR_TRAINING_START;
- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
- reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
- netif_dbg(pdata, link, pdata->netdev,
- "KR training initiated\n");
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
- if (pdata->phy_if.phy_impl.kr_training_post)
- pdata->phy_if.phy_impl.kr_training_post(pdata);
- }
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
return XGBE_AN_PAGE_RECEIVED;
}
@@ -1197,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
return 0;
}
-static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
{
int ret;
+ mutex_lock(&pdata->an_mutex);
+
set_bit(XGBE_LINK_INIT, &pdata->dev_state);
pdata->link_check = jiffies;
ret = pdata->phy_if.phy_impl.an_config(pdata);
if (ret)
- return ret;
+ goto out;
if (pdata->phy.autoneg != AUTONEG_ENABLE) {
ret = xgbe_phy_config_fixed(pdata);
if (ret || !pdata->kr_redrv)
- return ret;
+ goto out;
netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
} else {
@@ -1221,24 +1206,27 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation interrupt */
disable_irq(pdata->an_irq);
- /* Start auto-negotiation in a supported mode */
- if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
- xgbe_set_mode(pdata, XGBE_MODE_KR);
- } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
- xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
- } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
- xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
- } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
- xgbe_set_mode(pdata, XGBE_MODE_SFI);
- } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
- xgbe_set_mode(pdata, XGBE_MODE_X);
- } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
- xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
- } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
- xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
- } else {
- enable_irq(pdata->an_irq);
- return -EINVAL;
+ if (set_mode) {
+ /* Start auto-negotiation in a supported mode */
+ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SFI);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+ xgbe_set_mode(pdata, XGBE_MODE_X);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else {
+ enable_irq(pdata->an_irq);
+ ret = -EINVAL;
+ goto out;
+ }
}
/* Disable and stop any in progress auto-negotiation */
@@ -1258,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
xgbe_an_init(pdata);
xgbe_an_restart(pdata);
- return 0;
-}
-
-static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
-{
- int ret;
-
- mutex_lock(&pdata->an_mutex);
-
- ret = __xgbe_phy_config_aneg(pdata);
+out:
if (ret)
set_bit(XGBE_LINK_ERR, &pdata->dev_state);
else
@@ -1278,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
return ret;
}
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ return __xgbe_phy_config_aneg(pdata, true);
+}
+
+static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata)
+{
+ return __xgbe_phy_config_aneg(pdata, false);
+}
+
static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
{
return (pdata->an_result == XGBE_AN_COMPLETE);
@@ -1334,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
- xgbe_set_mode(pdata, mode);
+ if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+ xgbe_phy_reconfig_aneg(pdata);
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1639,4 +1629,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
phy_if->phy_valid_speed = xgbe_phy_valid_speed;
phy_if->an_isr = xgbe_an_combined_isr;
+
+ phy_if->module_info = xgbe_phy_module_info;
+ phy_if->module_eeprom = xgbe_phy_module_eeprom;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 82d1f416ee2a..7b86240ecd5f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -335,16 +335,33 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdata->awcr = XGBE_DMA_PCI_AWCR;
pdata->awarcr = XGBE_DMA_PCI_AWARCR;
+ /* Read the port property registers */
+ pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
+ pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
+ pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
+ dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
+ dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
+ dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
+ dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
+ }
+
/* Set the maximum channels and queues */
- reg = XP_IOREAD(pdata, XP_PROP_1);
- pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
- pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
- pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
- pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
+ pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
+ MAX_RX_QUEUES);
if (netif_msg_probe(pdata)) {
dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
pdata->tx_max_channel_count,
- pdata->tx_max_channel_count);
+ pdata->rx_max_channel_count);
dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
pdata->tx_max_q_count, pdata->rx_max_q_count);
}
@@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
xgbe_set_counts(pdata);
/* Set the maximum fifo amounts */
- reg = XP_IOREAD(pdata, XP_PROP_2);
- pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
+ TX_FIFO_SIZE);
pdata->tx_max_fifo_size *= 16384;
pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
pdata->vdata->tx_max_fifo_size);
- pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
+ RX_FIFO_SIZE);
pdata->rx_max_fifo_size *= 16384;
pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
pdata->vdata->rx_max_fifo_size);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index aac884314000..3ceb4f95ca7c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -119,6 +119,7 @@
#include <linux/kmod.h>
#include <linux/mdio.h>
#include <linux/phy.h>
+#include <linux/ethtool.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -270,6 +271,15 @@ struct xgbe_sfp_eeprom {
u8 vendor[32];
};
+#define XGBE_SFP_DIAGS_SUPPORTED(_x) \
+ ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \
+ !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+
+#define XGBE_SFP_EEPROM_BASE_LEN 256
+#define XGBE_SFP_EEPROM_DIAG_LEN 256
+#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \
+ XGBE_SFP_EEPROM_DIAG_LEN)
+
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
@@ -327,8 +337,6 @@ struct xgbe_phy_data {
unsigned int mdio_addr;
- unsigned int comm_owned;
-
/* SFP Support */
enum xgbe_sfp_comm sfp_comm;
unsigned int sfp_mux_address;
@@ -345,7 +353,6 @@ struct xgbe_phy_data {
unsigned int sfp_rx_los;
unsigned int sfp_tx_fault;
unsigned int sfp_mod_absent;
- unsigned int sfp_diags;
unsigned int sfp_changed;
unsigned int sfp_phy_avail;
unsigned int sfp_cable_len;
@@ -382,12 +389,6 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
struct xgbe_i2c_op *i2c_op)
{
- struct xgbe_phy_data *phy_data = pdata->phy_data;
-
- /* Be sure we own the bus */
- if (WARN_ON(!phy_data->comm_owned))
- return -EIO;
-
return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
}
@@ -549,10 +550,6 @@ static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
{
- struct xgbe_phy_data *phy_data = pdata->phy_data;
-
- phy_data->comm_owned = 0;
-
mutex_unlock(&xgbe_phy_comm_lock);
}
@@ -562,9 +559,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
unsigned long timeout;
unsigned int mutex_id;
- if (phy_data->comm_owned)
- return 0;
-
/* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
* the driver needs to take the software mutex and then the hardware
* mutexes before being able to use the busses.
@@ -593,7 +587,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
- phy_data->comm_owned = 1;
return 0;
}
@@ -867,6 +860,9 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return false;
+
if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
return false;
@@ -892,8 +888,83 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
return true;
}
+static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ unsigned int phy_id = phy_data->phydev->phy_id;
+ int reg;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return false;
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
+ return false;
+
+ /* For Bel-Fuse, use the extra AN flag */
+ pdata->an_again = 1;
+
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN))
+ return false;
+
+ if ((phy_id & 0xfffffff0) != 0x03625d10)
+ return false;
+
+ /* Disable RGMII mode */
+ phy_write(phy_data->phydev, 0x18, 0x7007);
+ reg = phy_read(phy_data->phydev, 0x18);
+ phy_write(phy_data->phydev, 0x18, reg & ~0x0080);
+
+ /* Enable fiber register bank */
+ phy_write(phy_data->phydev, 0x1c, 0x7c00);
+ reg = phy_read(phy_data->phydev, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0001;
+ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001);
+
+ /* Power down SerDes */
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg | 0x00800);
+
+ /* Configure SGMII-to-Copper mode */
+ phy_write(phy_data->phydev, 0x1c, 0x7c00);
+ reg = phy_read(phy_data->phydev, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0006;
+ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004);
+
+ /* Power up SerDes */
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+
+ /* Enable copper register bank */
+ phy_write(phy_data->phydev, 0x1c, 0x7c00);
+ reg = phy_read(phy_data->phydev, 0x1c);
+ reg &= 0x03ff;
+ reg &= ~0x0001;
+ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg);
+
+ /* Power up SerDes */
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+
+ phy_data->phydev->supported = PHY_GBIT_FEATURES;
+ phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phy_data->phydev->advertising = phy_data->phydev->supported;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "BelFuse PHY quirk in place\n");
+
+ return true;
+}
+
static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
{
+ if (xgbe_phy_belfuse_phy_quirks(pdata))
+ return;
+
if (xgbe_phy_finisar_phy_quirks(pdata))
return;
}
@@ -910,6 +981,9 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
if (phy_data->phydev)
return 0;
+ /* Clear the extra AN flag */
+ pdata->an_again = 0;
+
/* Check for the use of an external PHY */
if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE)
return 0;
@@ -1034,37 +1108,6 @@ static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
return false;
}
-static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
-{
- struct xgbe_phy_data *phy_data = pdata->phy_data;
- struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
-
- if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
- XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
- return false;
-
- if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
- XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) {
- phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
- phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
- phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
- if (phy_data->sfp_changed)
- netif_dbg(pdata, drv, pdata->netdev,
- "Bel-Fuse SFP quirk in place\n");
- return true;
- }
-
- return false;
-}
-
-static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata)
-{
- if (xgbe_phy_belfuse_parse_quirks(pdata))
- return true;
-
- return false;
-}
-
static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1083,9 +1126,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
- if (xgbe_phy_sfp_parse_quirks(pdata))
- return;
-
/* Assume ACTIVE cable unless told it is PASSIVE */
if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
@@ -1227,13 +1267,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
- if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) {
- u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG];
-
- if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
- phy_data->sfp_diags = 1;
- }
-
xgbe_phy_free_phy_device(pdata);
} else {
phy_data->sfp_changed = 0;
@@ -1283,7 +1316,6 @@ static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
phy_data->sfp_rx_los = 0;
phy_data->sfp_tx_fault = 0;
phy_data->sfp_mod_absent = 1;
- phy_data->sfp_diags = 0;
phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
@@ -1326,6 +1358,130 @@ put:
xgbe_phy_put_comm_ownership(pdata);
}
+static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX];
+ struct xgbe_sfp_eeprom *sfp_eeprom;
+ unsigned int i, j, rem;
+ int ret;
+
+ rem = eeprom->len;
+
+ if (!eeprom->len) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP) {
+ ret = -ENXIO;
+ goto done;
+ }
+
+ if (!netif_running(pdata->netdev)) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (phy_data->sfp_mod_absent) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
+ ret = -EIO;
+ goto put_own;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ eeprom_data, XGBE_SFP_EEPROM_BASE_LEN);
+ if (ret) {
+ netdev_err(pdata->netdev,
+ "I2C error reading SFP EEPROM\n");
+ ret = -EIO;
+ goto put_mux;
+ }
+
+ sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data;
+
+ if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) {
+ /* Read the SFP diagnostic eeprom */
+ eeprom_addr = 0;
+ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ eeprom_data + XGBE_SFP_EEPROM_BASE_LEN,
+ XGBE_SFP_EEPROM_DIAG_LEN);
+ if (ret) {
+ netdev_err(pdata->netdev,
+ "I2C error reading SFP DIAGS\n");
+ ret = -EIO;
+ goto put_mux;
+ }
+ }
+
+ for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) {
+ if ((j >= XGBE_SFP_EEPROM_BASE_LEN) &&
+ !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom))
+ break;
+
+ data[i] = eeprom_data[j];
+ rem--;
+ }
+
+put_mux:
+ xgbe_phy_sfp_put_mux(pdata);
+
+put_own:
+ xgbe_phy_put_comm_ownership(pdata);
+
+done:
+ eeprom->len -= rem;
+
+ return ret;
+}
+
+static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
+ return -ENXIO;
+
+ if (!netif_running(pdata->netdev))
+ return -EIO;
+
+ if (phy_data->sfp_mod_absent)
+ return -EIO;
+
+ if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ }
+
+ return 0;
+}
+
static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
@@ -1611,6 +1767,10 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
XGBE_CLR_ADV(dlks, 1000baseKX_Full);
XGBE_CLR_ADV(dlks, 10000baseKR_Full);
+ /* Advertise FEC support is present */
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ XGBE_SET_ADV(dlks, 10000baseR_FEC);
+
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
XGBE_SET_ADV(dlks, 10000baseKR_Full);
@@ -2421,22 +2581,21 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
-
- reg = XP_IOREAD(pdata, XP_PROP_3);
phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
- XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
+ XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_ADDR);
- phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
+ phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_MASK);
- phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3,
GPIO_RX_LOS);
- phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3,
GPIO_TX_FAULT);
- phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3,
GPIO_MOD_ABS);
- phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3,
GPIO_RATE_SELECT);
if (netif_msg_probe(pdata)) {
@@ -2458,18 +2617,17 @@ static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg, mux_addr_hi, mux_addr_lo;
+ unsigned int mux_addr_hi, mux_addr_lo;
- reg = XP_IOREAD(pdata, XP_PROP_4);
-
- mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
- mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
+ mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO);
if (mux_addr_lo == XGBE_SFP_DIRECT)
return;
phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
- phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
+ phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4,
+ MUX_CHAN);
if (netif_msg_probe(pdata)) {
dev_dbg(pdata->dev, "SFP: mux_address=%#x\n",
@@ -2592,13 +2750,11 @@ static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
return 0;
- reg = XP_IOREAD(pdata, XP_PROP_3);
- phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
+ phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET);
switch (phy_data->mdio_reset) {
case XGBE_MDIO_RESET_NONE:
case XGBE_MDIO_RESET_I2C_GPIO:
@@ -2612,12 +2768,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
- XP_GET_BITS(reg, XP_PROP_3,
+ XP_GET_BITS(pdata->pp3, XP_PROP_3,
MDIO_RESET_I2C_ADDR);
- phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
MDIO_RESET_I2C_GPIO);
} else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) {
- phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
MDIO_RESET_INT_GPIO);
}
@@ -2707,12 +2863,9 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
{
- unsigned int reg;
-
- reg = XP_IOREAD(pdata, XP_PROP_0);
- if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
+ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS))
return false;
- if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
+ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE))
return false;
return true;
@@ -2921,7 +3074,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data;
struct mii_bus *mii;
- unsigned int reg;
int ret;
/* Check if enabled */
@@ -2940,12 +3092,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
return -ENOMEM;
pdata->phy_data = phy_data;
- reg = XP_IOREAD(pdata, XP_PROP_0);
- phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
- phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
- phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
- phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
- phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
+ phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR);
if (netif_msg_probe(pdata)) {
dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode);
dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id);
@@ -2954,12 +3105,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr);
}
- reg = XP_IOREAD(pdata, XP_PROP_4);
- phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
- phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
- phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
- phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
- phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
+ phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL);
if (phy_data->redrv && netif_msg_probe(pdata)) {
dev_dbg(pdata->dev, "redrv present\n");
dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if);
@@ -3231,4 +3381,7 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
phy_impl->kr_training_post = xgbe_phy_kr_training_post;
+
+ phy_impl->module_info = xgbe_phy_module_info;
+ phy_impl->module_eeprom = xgbe_phy_module_eeprom;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 95d4b56448c6..47bcbcf58048 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -144,6 +144,11 @@
#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
#define XGBE_RX_DESC_CNT 512
+#define XGBE_TX_DESC_CNT_MIN 64
+#define XGBE_TX_DESC_CNT_MAX 4096
+#define XGBE_RX_DESC_CNT_MIN 64
+#define XGBE_RX_DESC_CNT_MAX 4096
+
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
@@ -835,6 +840,7 @@ struct xgbe_hw_if {
* Optional routines:
* an_pre, an_post
* kr_training_pre, kr_training_post
+ * module_info, module_eeprom
*/
struct xgbe_phy_impl_if {
/* Perform Setup/teardown actions */
@@ -883,6 +889,12 @@ struct xgbe_phy_impl_if {
/* Pre/Post KR training enablement support */
void (*kr_training_pre)(struct xgbe_prv_data *);
void (*kr_training_post)(struct xgbe_prv_data *);
+
+ /* SFP module related info */
+ int (*module_info)(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo);
+ int (*module_eeprom)(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data);
};
struct xgbe_phy_if {
@@ -905,6 +917,12 @@ struct xgbe_phy_if {
/* For single interrupt support */
irqreturn_t (*an_isr)(struct xgbe_prv_data *);
+ /* For ethtool PHY support */
+ int (*module_info)(struct xgbe_prv_data *pdata,
+ struct ethtool_modinfo *modinfo);
+ int (*module_eeprom)(struct xgbe_prv_data *pdata,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
/* PHY implementation specific services */
struct xgbe_phy_impl_if phy_impl;
};
@@ -1027,6 +1045,13 @@ struct xgbe_prv_data {
void __iomem *xprop_regs; /* XGBE property registers */
void __iomem *xi2c_regs; /* XGBE I2C CSRs */
+ /* Port property registers */
+ unsigned int pp0;
+ unsigned int pp1;
+ unsigned int pp2;
+ unsigned int pp3;
+ unsigned int pp4;
+
/* Overall device lock */
spinlock_t lock;
@@ -1097,6 +1122,9 @@ struct xgbe_prv_data {
unsigned int rx_ring_count;
unsigned int rx_desc_count;
+ unsigned int new_tx_ring_count;
+ unsigned int new_rx_ring_count;
+
unsigned int tx_max_q_count;
unsigned int rx_max_q_count;
unsigned int tx_q_count;
@@ -1233,6 +1261,7 @@ struct xgbe_prv_data {
enum xgbe_rx kr_state;
enum xgbe_rx kx_state;
struct work_struct an_work;
+ unsigned int an_again;
unsigned int an_supported;
unsigned int parallel_detect;
unsigned int fec_ability;
@@ -1310,6 +1339,8 @@ int xgbe_powerup(struct net_device *, unsigned int);
int xgbe_powerdown(struct net_device *, unsigned int);
void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+void xgbe_restart_dev(struct xgbe_prv_data *pdata);
+void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 8cfce95c82fc..39cd3a27fe77 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -107,7 +107,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
return 0;
}
-int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
{
int err = 0;
u32 h = 0U;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f33b25fbca63..d5fca2e5a9bc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,7 +654,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
pkts = priv->rx_max_coalesced_frames;
if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
- moder = net_dim_get_def_profile(priv->dim.dim.mode);
+ moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
@@ -1064,7 +1064,7 @@ static void bcm_sysport_dim_work(struct work_struct *work)
struct bcm_sysport_priv *priv =
container_of(ndim, struct bcm_sysport_priv, dim);
struct net_dim_cq_moder cur_profile =
- net_dim_get_profile(dim->mode, dim->profile_ix);
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
dim->state = NET_DIM_START_MEASURE;
@@ -1437,7 +1437,7 @@ static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
/* If DIM was enabled, re-apply default parameters */
if (dim->use_dim) {
- moder = net_dim_get_def_profile(dim->dim.mode);
+ moder = net_dim_get_def_rx_moderation(dim->dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9ffc4a8c5fc7..3853296d78c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -384,7 +384,7 @@ static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
- if (ops == NULL)
+ if (!ops)
return -EINVAL;
if (cp->drv_state & CNIC_DRV_STATE_REGD)
@@ -755,13 +755,13 @@ bnx2_alloc_tx_mem(struct bnx2 *bp)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
- if (txr->tx_buf_ring == NULL)
+ if (!txr->tx_buf_ring)
return -ENOMEM;
txr->tx_desc_ring =
dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
&txr->tx_desc_mapping, GFP_KERNEL);
- if (txr->tx_desc_ring == NULL)
+ if (!txr->tx_desc_ring)
return -ENOMEM;
}
return 0;
@@ -779,7 +779,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
rxr->rx_buf_ring =
vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
- if (rxr->rx_buf_ring == NULL)
+ if (!rxr->rx_buf_ring)
return -ENOMEM;
for (j = 0; j < bp->rx_max_ring; j++) {
@@ -788,7 +788,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
RXBD_RING_SIZE,
&rxr->rx_desc_mapping[j],
GFP_KERNEL);
- if (rxr->rx_desc_ring[j] == NULL)
+ if (!rxr->rx_desc_ring[j])
return -ENOMEM;
}
@@ -796,7 +796,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
if (bp->rx_pg_ring_size) {
rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bp->rx_max_pg_ring);
- if (rxr->rx_pg_ring == NULL)
+ if (!rxr->rx_pg_ring)
return -ENOMEM;
}
@@ -807,7 +807,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
RXBD_RING_SIZE,
&rxr->rx_pg_desc_mapping[j],
GFP_KERNEL);
- if (rxr->rx_pg_desc_ring[j] == NULL)
+ if (!rxr->rx_pg_desc_ring[j])
return -ENOMEM;
}
@@ -845,7 +845,7 @@ bnx2_alloc_stats_blk(struct net_device *dev)
sizeof(struct statistics_block);
status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
&bp->status_blk_mapping, GFP_KERNEL);
- if (status_blk == NULL)
+ if (!status_blk)
return -ENOMEM;
bp->status_blk = status_blk;
@@ -914,7 +914,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
BNX2_PAGE_SIZE,
&bp->ctx_blk_mapping[i],
GFP_KERNEL);
- if (bp->ctx_blk[i] == NULL)
+ if (!bp->ctx_blk[i])
goto alloc_mem_err;
}
}
@@ -2667,7 +2667,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
u32 val;
good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
- if (good_mbuf == NULL)
+ if (!good_mbuf)
return -ENOMEM;
BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
@@ -3225,7 +3225,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (len <= bp->rx_copy_thresh) {
skb = netdev_alloc_skb(bp->dev, len + 6);
- if (skb == NULL) {
+ if (!skb) {
bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
goto next_rx;
@@ -3285,7 +3285,7 @@ next_rx:
sw_cons = BNX2_NEXT_RX_BD(sw_cons);
sw_prod = BNX2_NEXT_RX_BD(sw_prod);
- if ((rx_pkt == budget))
+ if (rx_pkt == budget)
break;
/* Refresh hw_cons to see if there is new work */
@@ -4561,7 +4561,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
if (align_start || align_end) {
align_buf = kmalloc(len32, GFP_KERNEL);
- if (align_buf == NULL)
+ if (!align_buf)
return -ENOMEM;
if (align_start) {
memcpy(align_buf, start, 4);
@@ -4575,7 +4575,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
flash_buffer = kmalloc(264, GFP_KERNEL);
- if (flash_buffer == NULL) {
+ if (!flash_buffer) {
rc = -ENOMEM;
goto nvram_write_end;
}
@@ -5440,7 +5440,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
int j;
- if (txr->tx_buf_ring == NULL)
+ if (!txr->tx_buf_ring)
continue;
for (j = 0; j < BNX2_TX_DESC_CNT; ) {
@@ -5448,7 +5448,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
struct sk_buff *skb = tx_buf->skb;
int k, last;
- if (skb == NULL) {
+ if (!skb) {
j = BNX2_NEXT_TX_BD(j);
continue;
}
@@ -5485,14 +5485,14 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
int j;
- if (rxr->rx_buf_ring == NULL)
+ if (!rxr->rx_buf_ring)
return;
for (j = 0; j < bp->rx_max_ring_idx; j++) {
struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
u8 *data = rx_buf->data;
- if (data == NULL)
+ if (!data)
continue;
dma_unmap_single(&bp->pdev->dev,
@@ -6826,7 +6826,7 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
{
struct bnx2 *bp = netdev_priv(dev);
- if (bp->stats_blk == NULL)
+ if (!bp->stats_blk)
return;
net_stats->rx_packets =
@@ -7217,7 +7217,7 @@ bnx2_get_eeprom_len(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
- if (bp->flash_info == NULL)
+ if (!bp->flash_info)
return 0;
return (int) bp->flash_size;
@@ -7678,7 +7678,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
u32 *temp_stats = (u32 *) bp->temp_stats_blk;
u8 *stats_len_arr = NULL;
- if (hw_stats == NULL) {
+ if (!hw_stats) {
memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
return;
}
@@ -8121,7 +8121,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->temp_stats_blk =
kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
- if (bp->temp_stats_blk == NULL) {
+ if (!bp->temp_stats_blk) {
rc = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 95871576ab92..8cd73ff5debc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4962,8 +4962,13 @@ void bnx2x_tx_timeout(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
-#ifdef BNX2X_STOP_ON_ERROR
+ /* We want the information of the dump logged,
+ * but calling bnx2x_panic() would kill all chances of recovery.
+ */
if (!bp->panic)
+#ifndef BNX2X_STOP_ON_ERROR
+ bnx2x_panic_dump(bp, false);
+#else
bnx2x_panic();
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7dd83d0ef0a0..22243c480a05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
* slots for the highest priority.
*/
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
- NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
/* Mapping between the CREDIT_WEIGHT registers and actual client
* numbers
*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 7c560d545c03..5a779b19d149 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
+bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f83769d8047b..dfa0839f6656 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -62,6 +62,7 @@
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
+#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -2383,6 +2384,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring;
+ u8 qidx;
ring = &txr->tx_ring_struct;
@@ -2411,7 +2413,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
- ring->queue_id = bp->q_info[j].queue_id;
+ qidx = bp->tc_to_qidx[j];
+ ring->queue_id = bp->q_info[qidx].queue_id;
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -3493,15 +3496,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
+ /* convert timeout to usec */
+ timeout *= 1000;
i = 0;
- tmo_count = timeout * 40;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
i++ < tmo_count) {
- usleep_range(25, 40);
+ /* on first few passes, just barely sleep */
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER)
+ usleep_range(HWRM_SHORT_MIN_TIMEOUT,
+ HWRM_SHORT_MAX_TIMEOUT);
+ else
+ usleep_range(HWRM_MIN_TIMEOUT,
+ HWRM_MAX_TIMEOUT);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -3513,25 +3530,34 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
HWRM_RESP_LEN_SFT;
valid = bp->hwrm_cmd_resp_addr + len - 1;
} else {
+ int j;
+
/* Check if response len is updated */
for (i = 0; i < tmo_count; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
break;
- usleep_range(25, 40);
+ /* on first few passes, just barely sleep */
+ if (i < DFLT_HWRM_CMD_TIMEOUT)
+ usleep_range(HWRM_SHORT_MIN_TIMEOUT,
+ HWRM_SHORT_MAX_TIMEOUT);
+ else
+ usleep_range(HWRM_MIN_TIMEOUT,
+ HWRM_MAX_TIMEOUT);
}
if (i >= tmo_count) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- timeout, le16_to_cpu(req->req_type),
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len);
return -1;
}
/* Last byte of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 1;
- for (i = 0; i < 5; i++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
@@ -3539,9 +3565,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
udelay(1);
}
- if (i >= 5) {
+ if (j >= HWRM_VALID_BIT_DELAY_USEC) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- timeout, le16_to_cpu(req->req_type),
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len, *valid);
return -1;
}
@@ -4334,26 +4361,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc || err) {
- switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_L2_CMPL:
- netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
- rc, err);
- return -1;
-
- case RING_FREE_REQ_RING_TYPE_RX:
- netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
- rc, err);
- return -1;
-
- case RING_FREE_REQ_RING_TYPE_TX:
- netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
- rc, err);
- return -1;
-
- default:
- netdev_err(bp->dev, "Invalid ring\n");
- return -1;
- }
+ netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
+ ring_type, rc, err);
+ return -EIO;
}
ring->fw_ring_id = ring_id;
return rc;
@@ -4477,23 +4487,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc || error_code) {
- switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_L2_CMPL:
- netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
- rc);
- return rc;
- case RING_FREE_REQ_RING_TYPE_RX:
- netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
- rc);
- return rc;
- case RING_FREE_REQ_RING_TYPE_TX:
- netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
- rc);
- return rc;
- default:
- netdev_err(bp->dev, "Invalid ring\n");
- return -1;
- }
+ netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
+ ring_type, rc, error_code);
+ return -EIO;
}
return 0;
}
@@ -4721,6 +4717,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
+ req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
+ req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -5309,6 +5309,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
bp->q_info[i].queue_profile = *qptr++;
+ bp->tc_to_qidx[i] = i;
}
qportcfg_exit:
@@ -5376,7 +5377,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
struct tm tm;
time64_t now = ktime_get_real_seconds();
- if (bp->hwrm_spec_code < 0x10400)
+ if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
+ bp->hwrm_spec_code < 0x10400)
return -EOPNOTSUPP;
time64_to_tm(now, 0, &tm);
@@ -5958,6 +5960,9 @@ static int bnxt_init_msix(struct bnxt *bp)
if (total_vecs > max)
total_vecs = max;
+ if (!total_vecs)
+ return 0;
+
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
@@ -6457,6 +6462,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!BNXT_SINGLE_PF(bp))
+ return 0;
+
diff = link_info->support_auto_speeds ^ link_info->advertising;
if ((link_info->support_auto_speeds | diff) !=
link_info->support_auto_speeds) {
@@ -6843,6 +6851,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
}
}
+static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -6850,6 +6860,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
+ /* Reserve rings now if none were reserved at driver probe. */
+ rc = bnxt_init_dflt_ring_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to reserve default rings at open\n");
+ return rc;
+ }
rc = bnxt_reserve_rings(bp);
if (rc)
return rc;
@@ -6877,6 +6893,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
bnxt_enable_napi(bp);
+ bnxt_debug_dev_init(bp);
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
@@ -6909,6 +6926,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return 0;
open_err:
+ bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
bnxt_del_napi(bp);
@@ -7002,6 +7020,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+ bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
@@ -7279,6 +7298,25 @@ skip_uc:
return rc;
}
+static bool bnxt_can_reserve_rings(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ /* No minimum rings were provisioned by the PF. Don't
+ * reserve rings by default when device is down.
+ */
+ if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
+ return true;
+
+ if (!netif_running(bp->dev))
+ return false;
+ }
+#endif
+ return true;
+}
+
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
@@ -7295,7 +7333,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -7729,7 +7767,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_bufs = 30;
coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2;
- coal->idle_thresh = 25;
+ coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
@@ -8529,6 +8567,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ if (!bnxt_can_reserve_rings(bp))
+ return 0;
+
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
@@ -8574,6 +8615,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
}
+static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
+{
+ int rc;
+
+ if (bp->tx_nr_rings)
+ return 0;
+
+ rc = bnxt_set_dflt_rings(bp, true);
+ if (rc) {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ return rc;
+ }
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ return rc;
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
+ bp->flags |= BNXT_FLAG_RFS;
+ bp->dev->features |= NETIF_F_NTUPLE;
+ }
+ return 0;
+}
+
int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
@@ -8614,8 +8678,8 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
#endif
}
return rc;
@@ -9078,6 +9142,7 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void)
{
+ bnxt_debug_init();
return pci_register_driver(&bnxt_pci_driver);
}
@@ -9086,6 +9151,7 @@ static void __exit bnxt_exit(void)
pci_unregister_driver(&bnxt_pci_driver);
if (bnxt_pf_wq)
destroy_workqueue(bnxt_pf_wq);
+ bnxt_debug_exit();
}
module_init(bnxt_init);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3d55d3b56865..9b14eb610b9f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -532,6 +532,19 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
+#define HWRM_SHORT_MIN_TIMEOUT 3
+#define HWRM_SHORT_MAX_TIMEOUT 10
+#define HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define HWRM_MIN_TIMEOUT 25
+#define HWRM_MAX_TIMEOUT 40
+
+#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \
+ ((n) * HWRM_SHORT_MIN_TIMEOUT) : \
+ (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
+ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
+
+#define HWRM_VALID_BIT_DELAY_USEC 20
#define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2
@@ -1242,6 +1255,7 @@ struct bnxt {
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+ u8 tc_to_qidx[BNXT_MAX_QUEUE];
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -1384,6 +1398,8 @@ struct bnxt {
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
+ struct dentry *debugfs_pdev;
+ struct dentry *debugfs_dim;
};
#define BNXT_RX_STATS_OFFSET(counter) \
@@ -1398,8 +1414,7 @@ struct bnxt {
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
-#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
-#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+#define SFF_DIAG_SUPPORT_OFFSET 0x5c
#define SFF_MODULE_ID_SFP 0x3
#define SFF_MODULE_ID_QSFP 0xc
#define SFF_MODULE_ID_QSFP_PLUS 0xd
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 3c746f2d9ed8..d5bc72cecde3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -21,6 +21,21 @@
#include "bnxt_dcb.h"
#ifdef CONFIG_BNXT_DCB
+static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
+{
+ int i, j;
+
+ for (i = 0; i < bp->max_tc; i++) {
+ if (bp->q_info[i].queue_id == queue_id) {
+ for (j = 0; j < bp->max_tc; j++) {
+ if (bp->tc_to_qidx[j] == i)
+ return j;
+ }
+ }
+ }
+ return -EINVAL;
+}
+
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
pri2cos = &req.pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 qidx;
+
req.enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
- pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
+ qidx = bp->tc_to_qidx[ets->prio_tc[i]];
+ pri2cos[i] = bp->q_info[qidx].queue_id;
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return rc;
@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
- int i, j;
+ int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 queue_id = pri2cos[i];
+ int tc;
- for (j = 0; j < bp->max_tc; j++) {
- if (bp->q_info[j].queue_id == queue_id) {
- ets->prio_tc[i] = j;
- break;
- }
- }
+ tc = bnxt_queue_to_tc(bp, queue_id);
+ if (tc >= 0)
+ ets->prio_tc[i] = tc;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
void *data;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
- data = &req.unused_0;
- for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < max_tc; i++) {
+ u8 qidx;
+
req.enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
memset(&cos2bw, 0, sizeof(cos2bw));
- cos2bw.queue_id = bp->q_info[i].queue_id;
+ qidx = bp->tc_to_qidx[i];
+ cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
+ data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
- if (i == 0) {
+ if (qidx == 0) {
req.queue_id0 = cos2bw.queue_id;
req.unused_0 = 0;
}
@@ -132,66 +151,81 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
- int j;
+ int tc;
memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
if (i == 0)
cos2bw.queue_id = resp->queue_id0;
- for (j = 0; j < bp->max_tc; j++) {
- if (bp->q_info[j].queue_id != cos2bw.queue_id)
- continue;
- if (cos2bw.tsa ==
- QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
- ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
- } else {
- ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
- ets->tc_tx_bw[j] = cos2bw.bw_weight;
- }
+ tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
+ if (tc < 0)
+ continue;
+
+ if (cos2bw.tsa ==
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
+ ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
+ } else {
+ ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
+ ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
-static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
+static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
{
- struct hwrm_queue_cfg_input req = {0};
- int i;
+ unsigned long qmap = 0;
+ int max = bp->max_tc;
+ int i, j, rc;
- if (netif_running(bp->dev))
- bnxt_tx_disable(bp);
+ /* Assign lossless TCs first */
+ for (i = 0, j = 0; i < max; ) {
+ if (lltc_mask & (1 << i)) {
+ if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
+ bp->tc_to_qidx[i] = j;
+ __set_bit(j, &qmap);
+ i++;
+ }
+ j++;
+ continue;
+ }
+ i++;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
- req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
- req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
+ for (i = 0, j = 0; i < max; i++) {
+ if (lltc_mask & (1 << i))
+ continue;
+ j = find_next_zero_bit(&qmap, max, j);
+ bp->tc_to_qidx[i] = j;
+ __set_bit(j, &qmap);
+ j++;
+ }
- /* Configure lossless queues to lossy first */
- req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
- for (i = 0; i < bp->max_tc; i++) {
- if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
- req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
- hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
- bp->q_info[i].queue_profile =
- QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ if (rc) {
+ netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
+ return rc;
}
}
-
- /* Now configure desired queues to lossless */
- req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
- for (i = 0; i < bp->max_tc; i++) {
- if (lltc_mask & (1 << i)) {
- req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
- hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
- bp->q_info[i].queue_profile =
- QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+ if (bp->ieee_ets) {
+ int tc = netdev_get_num_tc(bp->dev);
+
+ if (!tc)
+ tc = 1;
+ rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
+ if (rc) {
+ netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
+ if (rc) {
+ netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
+ return rc;
}
}
- if (netif_running(bp->dev))
- bnxt_tx_enable(bp);
-
return 0;
}
@@ -201,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
- bool need_q_recfg = false;
+ bool need_q_remap = false;
int rc;
if (!my_ets)
@@ -221,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
if (lltc_count > bp->max_lltc)
return -EINVAL;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
- req.flags = cpu_to_le32(pri_mask);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
-
for (i = 0; i < bp->max_tc; i++) {
if (tc_mask & (1 << i)) {
- if (!BNXT_LLQ(bp->q_info[i].queue_profile))
- need_q_recfg = true;
+ u8 qidx = bp->tc_to_qidx[i];
+
+ if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
+ need_q_remap = true;
+ break;
+ }
}
}
- if (need_q_recfg)
- rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
+ if (need_q_remap)
+ rc = bnxt_queue_remap(bp, tc_mask);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
+ req.flags = cpu_to_le32(pri_mask);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
new file mode 100644
index 000000000000..94e208e9789f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -0,0 +1,124 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "bnxt_hsi.h"
+#include <linux/net_dim.h>
+#include "bnxt.h"
+#include "bnxt_debugfs.h"
+
+static struct dentry *bnxt_debug_mnt;
+
+static ssize_t debugfs_dim_read(struct file *filep,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct net_dim *dim = filep->private_data;
+ int len;
+ char *buf;
+
+ if (*ppos)
+ return 0;
+ if (!dim)
+ return -ENODEV;
+ buf = kasprintf(GFP_KERNEL,
+ "state = %d\n" \
+ "profile_ix = %d\n" \
+ "mode = %d\n" \
+ "tune_state = %d\n" \
+ "steps_right = %d\n" \
+ "steps_left = %d\n" \
+ "tired = %d\n",
+ dim->state,
+ dim->profile_ix,
+ dim->mode,
+ dim->tune_state,
+ dim->steps_right,
+ dim->steps_left,
+ dim->tired);
+ if (!buf)
+ return -ENOMEM;
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations debugfs_dim_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = debugfs_dim_read,
+};
+
+static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx,
+ struct dentry *dd)
+{
+ static char qname[16];
+
+ snprintf(qname, 10, "%d", ring_idx);
+ return debugfs_create_file(qname, 0600, dd,
+ dim, &debugfs_dim_fops);
+}
+
+void bnxt_debug_dev_init(struct bnxt *bp)
+{
+ const char *pname = pci_name(bp->pdev);
+ struct dentry *pdevf;
+ int i;
+
+ bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
+ if (bp->debugfs_pdev) {
+ pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
+ if (!pdevf) {
+ pr_err("failed to create debugfs entry %s/dim\n",
+ pname);
+ return;
+ }
+ bp->debugfs_dim = pdevf;
+ /* create files for each rx ring */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (cpr && bp->bnapi[i]->rx_ring) {
+ pdevf = debugfs_dim_ring_init(&cpr->dim, i,
+ bp->debugfs_dim);
+ if (!pdevf)
+ pr_err("failed to create debugfs entry %s/dim/%d\n",
+ pname, i);
+ }
+ }
+ } else {
+ pr_err("failed to create debugfs entry %s\n", pname);
+ }
+}
+
+void bnxt_debug_dev_exit(struct bnxt *bp)
+{
+ if (bp) {
+ debugfs_remove_recursive(bp->debugfs_pdev);
+ bp->debugfs_pdev = NULL;
+ }
+}
+
+void bnxt_debug_init(void)
+{
+ bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
+ if (!bnxt_debug_mnt)
+ pr_err("failed to init bnxt_en debugfs\n");
+}
+
+void bnxt_debug_exit(void)
+{
+ debugfs_remove_recursive(bnxt_debug_mnt);
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
new file mode 100644
index 000000000000..d0bb4887acd0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+#ifdef CONFIG_DEBUG_FS
+void bnxt_debug_init(void);
+void bnxt_debug_exit(void);
+void bnxt_debug_dev_init(struct bnxt *bp);
+void bnxt_debug_dev_exit(struct bnxt *bp);
+#else
+static inline void bnxt_debug_init(void) {}
+static inline void bnxt_debug_exit(void) {}
+static inline void bnxt_debug_dev_init(struct bnxt *bp) {}
+static inline void bnxt_debug_dev_exit(struct bnxt *bp) {}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 408dd190331e..afa97c8bb081 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -21,11 +21,11 @@ void bnxt_dim_work(struct work_struct *work)
struct bnxt_napi *bnapi = container_of(cpr,
struct bnxt_napi,
cp_ring);
- struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
- dim->profile_ix);
+ struct net_dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
- cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+ cpr->rx_ring_coal.coal_ticks = cur_moder.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_moder.pkts;
bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
dim->state = NET_DIM_START_MEASURE;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8ba14ae00e8f..7270c8b0cef3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -140,6 +140,19 @@ reset_coalesce:
#define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+enum {
+ RX_TOTAL_DISCARDS,
+ TX_TOTAL_DISCARDS,
+};
+
+static struct {
+ u64 counter;
+ char string[ETH_GSTRING_LEN];
+} bnxt_sw_func_stats[] = {
+ {0, "rx_total_discard_pkts"},
+ {0, "tx_total_discard_pkts"},
+};
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -237,6 +250,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
};
+#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
@@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
{
int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ num_stats += BNXT_NUM_SW_FUNC_STATS;
+
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
@@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
if (!bp->bnapi)
return;
+ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
+ bnxt_sw_func_stats[i].counter = 0;
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
+
+ bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
+ le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
+ bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
+ le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
}
+
+ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
+ buf[j] = bnxt_sw_func_stats[i].counter;
+
if (bp->flags & BNXT_FLAG_PORT_STATS) {
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
@@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
+ strcpy(buf, bnxt_sw_func_stats[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+
if (bp->flags & BNXT_FLAG_PORT_STATS) {
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
strcpy(buf, bnxt_port_stats_arr[i].string);
@@ -551,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev,
* to renable
*/
}
+ } else {
+ rc = bnxt_reserve_rings(bp);
}
return rc;
@@ -1785,6 +1820,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
static int bnxt_get_eeprom_len(struct net_device *dev)
{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_VF(bp))
+ return 0;
+
/* The -1 return value allows the entire 32-bit range of offsets to be
* passed via the ethtool command-line utility.
*/
@@ -2144,9 +2184,8 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
static int bnxt_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
+ u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
struct bnxt *bp = netdev_priv(dev);
- struct hwrm_port_phy_i2c_read_input req = {0};
- struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
int rc;
/* No point in going further if phy status indicates
@@ -2161,21 +2200,19 @@ static int bnxt_get_module_info(struct net_device *dev,
if (bp->hwrm_spec_code < 0x10202)
return -EOPNOTSUPP;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
- req.i2c_slave_addr = I2C_DEV_ADDR_A0;
- req.page_number = 0;
- req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
- req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
- req.port_id = cpu_to_le16(bp->pf.port_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
+ SFF_DIAG_SUPPORT_OFFSET + 1,
+ data);
if (!rc) {
- u32 module_id = le32_to_cpu(output->data[0]);
+ u8 module_id = data[0];
+ u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
switch (module_id) {
case SFF_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ if (!diag_supported)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case SFF_MODULE_ID_QSFP:
case SFF_MODULE_ID_QSFP_PLUS:
@@ -2191,7 +2228,6 @@ static int bnxt_get_module_info(struct net_device *dev,
break;
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f952963d594e..a64910892c25 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
- req.min_rsscos_ctx = cpu_to_le16(1);
- req.max_rsscos_ctx = cpu_to_le16(1);
+ req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
+ req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
req.min_cmpl_rings = cpu_to_le16(1);
req.min_tx_rings = cpu_to_le16(1);
req.min_rx_rings = cpu_to_le16(1);
- req.min_l2_ctxs = cpu_to_le16(1);
+ req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
req.min_vnics = cpu_to_le16(1);
req.min_stat_ctx = cpu_to_le16(1);
req.min_hw_ring_grps = cpu_to_le16(1);
@@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings);
req.min_rx_rings = cpu_to_le16(vf_rx_rings);
- req.min_l2_ctxs = cpu_to_le16(4);
+ req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
@@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings);
req.max_rx_rings = cpu_to_le16(vf_rx_rings);
- req.max_l2_ctxs = cpu_to_le16(4);
+ req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
@@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input req = {0};
struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ return -EINVAL;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
/* Set the new target id */
@@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_reject_fwd_resp_input req = {0};
struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
+ return -EINVAL;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
@@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_exec_fwd_resp_input req = {0};
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
+ return -EINVAL;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
@@ -914,7 +923,8 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
if (is_valid_ether_addr(req->dflt_mac_addr) &&
((vf->flags & BNXT_VF_TRUST) ||
- (!is_valid_ether_addr(vf->mac_addr)))) {
+ !is_valid_ether_addr(vf->mac_addr) ||
+ ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index d10f6f6c7860..e9b20cd19881 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -11,6 +11,23 @@
#ifndef BNXT_SRIOV_H
#define BNXT_SRIOV_H
+#define BNXT_FWD_RESP_SIZE_ERR(n) \
+ ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \
+ sizeof(struct hwrm_fwd_resp_input))
+
+#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \
+ ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\
+ offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id))
+
+#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \
+ ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\
+ offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id))
+
+#define BNXT_VF_MIN_RSS_CTX 1
+#define BNXT_VF_MAX_RSS_CTX 1
+#define BNXT_VF_MIN_L2_CTX 1
+#define BNXT_VF_MAX_L2_CTX 4
+
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 1389ab5e05df..1f0e872d0667 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -113,10 +113,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
+ *len = xdp.data_end - xdp.data;
if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset;
- *len = xdp.data_end - xdp.data;
}
switch (act) {
case XDP_PASS:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0445f2c0c629..20c1681bb1af 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -652,7 +652,7 @@ static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
pkts = ring->rx_max_coalesced_frames;
if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
- moder = net_dim_get_def_profile(ring->dim.dim.mode);
+ moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
@@ -1925,7 +1925,7 @@ static void bcmgenet_dim_work(struct work_struct *work)
struct bcmgenet_rx_ring *ring =
container_of(ndim, struct bcmgenet_rx_ring, dim);
struct net_dim_cq_moder cur_profile =
- net_dim_get_profile(dim->mode, dim->profile_ix);
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
dim->state = NET_DIM_START_MEASURE;
@@ -2102,7 +2102,7 @@ static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
/* If DIM was enabled, re-apply default parameters */
if (dim->use_dim) {
- moder = net_dim_get_def_profile(dim->dim.mode);
+ moder = net_dim_get_def_rx_moderation(dim->dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b4c9268100bb..3e93df5d4e3b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -591,16 +591,10 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
- if (np) {
- err = of_mdiobus_register(bp->mii_bus, np);
- } else {
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
-
- err = mdiobus_register(bp->mii_bus);
- }
-
+ err = of_mdiobus_register(bp->mii_bus, np);
if (err)
goto err_out_free_mdiobus;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index e8b290473ee2..929d485a3a2f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct)
CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
}
-static int cn23xx_sriov_config(struct octeon_device *oct)
+int cn23xx_sriov_config(struct octeon_device *oct)
{
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u32 max_rings, total_rings, max_vfs, rings_per_vf;
@@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct)
break;
}
- if (max_rings <= num_present_cpus())
- num_pf_rings = 1;
+ if (oct->sriov_info.num_pf_rings)
+ num_pf_rings = oct->sriov_info.num_pf_rings;
else
num_pf_rings = num_present_cpus();
@@ -1497,3 +1497,57 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
octeon_mbox_write(oct, &mbox_cmd);
}
}
+
+static void
+cn23xx_get_vf_stats_callback(struct octeon_device *oct,
+ struct octeon_mbox_cmd *cmd, void *arg)
+{
+ struct oct_vf_stats_ctx *ctx = arg;
+
+ memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
+ atomic_set(&ctx->status, 1);
+}
+
+int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
+ struct oct_vf_stats *stats)
+{
+ u32 timeout = HZ; // 1sec
+ struct octeon_mbox_cmd mbox_cmd;
+ struct oct_vf_stats_ctx ctx;
+ u32 count = 0, ret;
+
+ if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
+ return -1;
+
+ if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
+ return -1;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+ ctx.stats = stats;
+ atomic_set(&ctx.status, 0);
+ mbox_cmd.fn_arg = (void *)&ctx;
+ memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
+ octeon_mbox_write(oct, &mbox_cmd);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
+
+ ret = atomic_read(&ctx.status);
+ if (ret == 0) {
+ octeon_mbox_cancel(oct, 0);
+ dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
+ vfidx);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 2aba5247b6d8..e6f31d0d5c0b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -43,6 +43,15 @@ struct octeon_cn23xx_pf {
#define CN23XX_SLI_DEF_BP 0x40
+struct oct_vf_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 broadcast;
+ u64 multicast;
+};
+
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
int validate_cn23xx_pf_config_info(struct octeon_device *oct,
@@ -52,8 +61,13 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+int cn23xx_sriov_config(struct octeon_device *oct);
+
int cn23xx_fw_loaded(struct octeon_device *oct);
void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
u8 *mac);
+
+int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx,
+ struct oct_vf_stats *stats);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a94eee943b2..8093c5eafea2 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -29,6 +29,162 @@
/* OOM task polling interval */
#define LIO_OOM_POLL_INTERVAL_MS 250
+#define OCTNIC_MAX_SG MAX_SKB_FRAGS
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+void lio_if_cfg_callback(struct octeon_device *oct,
+ u32 status __attribute__((unused)), void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_if_cfg_context *ctx;
+ struct liquidio_if_cfg_resp *resp;
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (resp->status)
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+ CVM_CAST64(resp->status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Delete gather lists
+ * @param lio per-network private data
+ */
+void lio_delete_glists(struct lio *lio)
+{
+ struct octnic_gather *g;
+ int i;
+
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->oct_dev->num_iqs; i++) {
+ do {
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[i]);
+ kfree(g);
+ } while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
+ }
+
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
+ kfree(lio->glist);
+ lio->glist = NULL;
+}
+
+/**
+ * \brief Setup gather lists
+ * @param lio per-network private data
+ */
+int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
+{
+ struct octnic_gather *g;
+ int i, j;
+
+ lio->glist_lock =
+ kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
+ if (!lio->glist_lock)
+ return -ENOMEM;
+
+ lio->glist =
+ kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
+ if (!lio->glist) {
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_iqs; i++) {
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(oct,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc_node(sizeof(*g), GFP_KERNEL,
+ numa_node);
+ if (!g)
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
+
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
+
+ list_add_tail(&g->list, &lio->glist[i]);
+ }
+
+ if (j != lio->tx_qsize) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{
struct lio *lio = GET_LIO(netdev);
@@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
int num_ioq_vectors;
int irqret, err;
- oct->num_msix_irqs = num_ioqs;
if (oct->msix_on) {
+ oct->num_msix_irqs = num_ioqs;
if (OCTEON_CN23XX_PF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
@@ -1169,3 +1325,355 @@ int lio_wait_for_clean_oq(struct octeon_device *oct)
return pending_pkts;
}
+
+static void
+octnet_nic_stats_callback(struct octeon_device *oct_dev,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp =
+ (struct oct_nic_stats_resp *)sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl =
+ (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
+ struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
+ struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
+ struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
+
+ if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->stats,
+ (sizeof(struct oct_link_stats)) >> 3);
+
+ /* RX link-level stats */
+ rstats->total_rcvd = rsp_rstats->total_rcvd;
+ rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
+ rstats->total_bcst = rsp_rstats->total_bcst;
+ rstats->total_mcst = rsp_rstats->total_mcst;
+ rstats->runts = rsp_rstats->runts;
+ rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
+ /* Accounts for over/under-run of buffers */
+ rstats->fifo_err = rsp_rstats->fifo_err;
+ rstats->dmac_drop = rsp_rstats->dmac_drop;
+ rstats->fcs_err = rsp_rstats->fcs_err;
+ rstats->jabber_err = rsp_rstats->jabber_err;
+ rstats->l2_err = rsp_rstats->l2_err;
+ rstats->frame_err = rsp_rstats->frame_err;
+ rstats->red_drops = rsp_rstats->red_drops;
+
+ /* RX firmware stats */
+ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
+ rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
+ rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
+ rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
+ rstats->fw_err_pko = rsp_rstats->fw_err_pko;
+ rstats->fw_err_link = rsp_rstats->fw_err_link;
+ rstats->fw_err_drop = rsp_rstats->fw_err_drop;
+ rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
+ rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
+
+ /* Number of packets that are LROed */
+ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
+ /* Number of octets that are LROed */
+ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
+ /* Number of LRO packets formed */
+ rstats->fw_total_lro = rsp_rstats->fw_total_lro;
+ /* Number of times lRO of packet aborted */
+ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
+ rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
+ rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
+ rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
+ rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ rstats->fwd_rate = rsp_rstats->fwd_rate;
+
+ /* TX link-level stats */
+ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
+ tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
+ tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
+ tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
+ tstats->ctl_sent = rsp_tstats->ctl_sent;
+ /* Packets sent after one collision*/
+ tstats->one_collision_sent = rsp_tstats->one_collision_sent;
+ /* Packets sent after multiple collision*/
+ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ tstats->max_collision_fail = rsp_tstats->max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ tstats->fifo_err = rsp_tstats->fifo_err;
+ tstats->runts = rsp_tstats->runts;
+ /* Total number of collisions detected */
+ tstats->total_collisions = rsp_tstats->total_collisions;
+
+ /* firmware stats */
+ tstats->fw_total_sent = rsp_tstats->fw_total_sent;
+ tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
+ tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
+ tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
+ tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_pki = rsp_tstats->fw_err_pki;
+ tstats->fw_err_link = rsp_tstats->fw_err_link;
+ tstats->fw_err_drop = rsp_tstats->fw_err_drop;
+ tstats->fw_tso = rsp_tstats->fw_tso;
+ tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
+ tstats->fw_err_tso = rsp_tstats->fw_err_tso;
+ tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
+
+ resp->status = 1;
+ } else {
+ resp->status = -1;
+ }
+ complete(&ctrl->complete);
+}
+
+int octnet_get_link_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_ctrl *ctrl;
+ struct oct_nic_stats_resp *resp;
+ int retval;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
+ ctrl->netdev = netdev;
+ init_completion(&ctrl->complete);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ sc->callback = octnet_nic_stats_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 500; /*in milli seconds*/
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
+
+ if (resp->status != 1) {
+ octeon_free_soft_command(oct_dev, sc);
+
+ return -EINVAL;
+ }
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return 0;
+}
+
+static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct liquidio_nic_seapi_ctl_context *ctx;
+ struct octeon_soft_command *sc = buf;
+
+ ctx = sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (status) {
+ dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
+ __func__,
+ CVM_CAST64(status));
+ }
+ ctx->status = status;
+ complete(&ctx->complete);
+}
+
+int liquidio_set_speed(struct lio *lio, int speed)
+{
+ struct liquidio_nic_seapi_ctl_context *ctx;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ u32 ctx_size;
+ int retval;
+ u32 var;
+
+ if (oct->speed_setting == speed)
+ return 0;
+
+ if (!OCTEON_CN23XX_PF(oct)) {
+ dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp),
+ ctx_size);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ ctx = sc->ctxptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ ctx->octeon_id = lio_get_device_id(oct);
+ ctx->status = 0;
+ init_completion(&ctx->complete);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
+ ncmd->s.param1 = speed;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ sc->callback = liquidio_nic_seapi_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ retval = -EBUSY;
+ } else {
+ /* Wait for response or timeout */
+ if (wait_for_completion_timeout(&ctx->complete,
+ msecs_to_jiffies(10000)) == 0) {
+ dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
+ __func__);
+ octeon_free_soft_command(oct, sc);
+ return -EINTR;
+ }
+
+ retval = resp->status;
+
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
+ __func__, retval);
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
+ }
+
+ var = be32_to_cpu((__force __be32)resp->speed);
+ if (var != speed) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: setting failed speed= %x, expect %x\n",
+ __func__, var, speed);
+ }
+
+ oct->speed_setting = var;
+ }
+
+ octeon_free_soft_command(oct, sc);
+
+ return retval;
+}
+
+int liquidio_get_speed(struct lio *lio)
+{
+ struct liquidio_nic_seapi_ctl_context *ctx;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ u32 ctx_size;
+ int retval;
+
+ ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp),
+ ctx_size);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ ctx = sc->ctxptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ ctx->octeon_id = lio_get_device_id(oct);
+ ctx->status = 0;
+ init_completion(&ctx->complete);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ sc->callback = liquidio_nic_seapi_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ oct->no_speed_setting = 1;
+ oct->speed_setting = 25;
+
+ retval = -EBUSY;
+ } else {
+ if (wait_for_completion_timeout(&ctx->complete,
+ msecs_to_jiffies(10000)) == 0) {
+ dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
+ __func__);
+
+ oct->speed_setting = 25;
+ oct->no_speed_setting = 1;
+
+ octeon_free_soft_command(oct, sc);
+
+ return -EINTR;
+ }
+ retval = resp->status;
+ if (retval) {
+ dev_err(&oct->pci_dev->dev,
+ "%s failed retval=%d\n", __func__, retval);
+ oct->no_speed_setting = 1;
+ oct->speed_setting = 25;
+ octeon_free_soft_command(oct, sc);
+ retval = -EIO;
+ } else {
+ u32 var;
+
+ var = be32_to_cpu((__force __be32)resp->speed);
+ oct->speed_setting = var;
+ if (var == 0xffff) {
+ oct->no_speed_setting = 1;
+ /* unable to access boot variables
+ * get the default value based on the NIC type
+ */
+ oct->speed_setting = 25;
+ }
+ }
+ }
+
+ octeon_free_soft_command(oct, sc);
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 550ac29682a5..06f7449c569d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -32,7 +32,6 @@
#include "cn23xx_vf_device.h"
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
-static int octnet_get_link_stats(struct net_device *netdev);
struct oct_intrmod_context {
int octeon_id;
@@ -96,11 +95,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_packets",
"rx_bytes",
"tx_bytes",
- "rx_errors", /*jabber_err+l2_err+frame_err */
- "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
- "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
- *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
- */
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
"tx_dropped",
"tx_total_sent",
@@ -115,14 +112,17 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_tso_err",
"tx_vxlan",
+ "tx_mcast",
+ "tx_bcast",
+
"mac_tx_total_pkts",
"mac_tx_total_bytes",
"mac_tx_mcast_pkts",
"mac_tx_bcast_pkts",
- "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
+ "mac_tx_ctl_packets",
"mac_tx_total_collisions",
"mac_tx_one_collision",
- "mac_tx_multi_collison",
+ "mac_tx_multi_collision",
"mac_tx_max_collision_fail",
"mac_tx_max_deferal_fail",
"mac_tx_fifo_err",
@@ -130,6 +130,8 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"rx_total_rcvd",
"rx_total_fwd",
+ "rx_mcast",
+ "rx_bcast",
"rx_jabber_err",
"rx_l2_err",
"rx_frame_err",
@@ -170,17 +172,21 @@ static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
"tx_packets",
"rx_bytes",
"tx_bytes",
- "rx_errors", /* jabber_err + l2_err+frame_err */
- "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
- "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
"tx_dropped",
+ "rx_mcast",
+ "tx_mcast",
+ "rx_bcast",
+ "tx_bcast",
"link_state_changes",
};
/* statistics of host tx queue */
static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
- "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
+ "packets",
+ "bytes",
"dropped",
"iq_busy",
"sgentry_sent",
@@ -197,13 +203,9 @@ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
/* statistics of host rx queue */
static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
- "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
- "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
- *oct->droq[oq_no]->stats.dropped_nodispatch+
- *oct->droq[oq_no]->stats.dropped_toomany+
- *oct->droq[oq_no]->stats.dropped_nomem
- */
+ "packets",
+ "bytes",
+ "dropped",
"dropped_nomem",
"dropped_toomany",
"fw_dropped",
@@ -228,46 +230,147 @@ static int lio_get_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
- u32 supported = 0, advertising = 0;
linfo = &lio->linfo;
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+
switch (linfo->link.s.phy_type) {
case LIO_PHY_PORT_TP:
ecmd->base.port = PORT_TP;
- supported = (SUPPORTED_10000baseT_Full |
- SUPPORTED_TP | SUPPORTED_Pause);
- advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
ecmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+
break;
case LIO_PHY_PORT_FIBRE:
- ecmd->base.port = PORT_FIBRE;
-
- if (linfo->link.s.speed == SPEED_10000) {
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
+ dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
+ } else {
+ dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
+ linfo->link.s.if_mode);
}
- supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
- advertising |= ADVERTISED_Pause;
+ ecmd->base.port = PORT_FIBRE;
ecmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (OCTEON_CN23XX_PF(oct)) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseCR_Full);
+
+ if (oct->no_speed_setting == 0) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseCR_Full);
+ }
+
+ if (oct->no_speed_setting == 0)
+ liquidio_get_speed(lio);
+ else
+ oct->speed_setting = 25;
+
+ if (oct->speed_setting == 10) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseCR_Full);
+ }
+ if (oct->speed_setting == 25) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseCR_Full);
+ }
+ } else { /* VF */
+ if (linfo->link.s.speed == 10000) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseCR_Full);
+ }
+
+ if (linfo->link.s.speed == 25000) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseCR_Full);
+ }
+ }
+ } else {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ }
break;
}
- if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd->link_modes.supported, supported);
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd->link_modes.advertising, advertising);
- } else {
- dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
- linfo->link.s.if_mode);
- }
-
if (linfo->link.s.link_up) {
ecmd->base.speed = linfo->link.s.speed;
ecmd->base.duplex = linfo->link.s.duplex;
@@ -279,6 +382,51 @@ static int lio_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static int lio_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ const int speed = ecmd->base.speed;
+ struct lio *lio = GET_LIO(netdev);
+ struct oct_link_info *linfo;
+ struct octeon_device *oct;
+ u32 is25G = 0;
+
+ oct = lio->oct_dev;
+
+ linfo = &lio->linfo;
+
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ is25G = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (oct->no_speed_setting) {
+ dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
+ ecmd->base.duplex != linfo->link.s.duplex) ||
+ ecmd->base.autoneg != AUTONEG_DISABLE ||
+ (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
+ ecmd->base.speed != SPEED_UNKNOWN))
+ return -EOPNOTSUPP;
+
+ if ((oct->speed_boot == speed / 1000) &&
+ oct->speed_boot == oct->speed_setting)
+ return 0;
+
+ liquidio_set_speed(lio, speed / 1000);
+
+ dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
+ oct->speed_setting);
+
+ return 0;
+}
+
static void
lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
@@ -359,7 +507,14 @@ lio_ethtool_get_channels(struct net_device *dev,
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) {
- max_combined = lio->linfo.num_txpciq;
+ if (oct->sriov_info.sriov_enabled) {
+ max_combined = lio->linfo.num_txpciq;
+ } else {
+ struct octeon_config *conf23_pf =
+ CHIP_CONF(oct, cn23xx_pf);
+
+ max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
+ }
combined_count = oct->num_iqs;
} else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL;
@@ -423,9 +578,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
+
+ if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
+ dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return -1;
+ }
+
if (octeon_setup_interrupt(oct, num_ioqs)) {
dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
- return 1;
+ return -1;
}
/* Enable Octeon device interrupts */
@@ -455,7 +616,16 @@ lio_ethtool_set_channels(struct net_device *dev,
combined_count = channel->combined_count;
if (OCTEON_CN23XX_PF(oct)) {
- max_combined = channel->max_combined;
+ if (oct->sriov_info.sriov_enabled) {
+ max_combined = lio->linfo.num_txpciq;
+ } else {
+ struct octeon_config *conf23_pf =
+ CHIP_CONF(oct,
+ cn23xx_pf);
+
+ max_combined =
+ CFG_GET_IQ_MAX_Q(conf23_pf);
+ }
} else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL;
u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
@@ -483,7 +653,6 @@ lio_ethtool_set_channels(struct net_device *dev,
if (lio_reset_queues(dev, combined_count))
return -EINVAL;
- lio_irq_reallocate_irqs(oct, combined_count);
if (stopped)
dev->netdev_ops->ndo_open(dev);
@@ -822,12 +991,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
ering->rx_jumbo_max_pending = 0;
}
+static int lio_23xx_reconfigure_queue_count(struct lio *lio)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ struct liquidio_if_cfg_context *ctx;
+ u32 resp_size, ctx_size, data_size;
+ struct liquidio_if_cfg_resp *resp;
+ struct octeon_soft_command *sc;
+ union oct_nic_if_cfg if_cfg;
+ struct lio_version *vdata;
+ u32 ifidx_or_pfnum;
+ int retval;
+ int j;
+
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ ctx_size = sizeof(struct liquidio_if_cfg_context);
+ data_size = sizeof(struct lio_version);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, data_size,
+ resp_size, ctx_size);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
+ __func__);
+ return -1;
+ }
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+ ifidx_or_pfnum = oct->pf_num;
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct);
+ init_waitqueue_head(&ctx->wc);
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
+ if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
+ if_cfg.s.base_queue = oct->sriov_info.pf_srn;
+ if_cfg.s.gmx_port_id = oct->pf_num;
+
+ sc->iq_no = 0;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_QCOUNT_UPDATE, 0,
+ if_cfg.u64, 0);
+ sc->callback = lio_if_cfg_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = LIO_IFCFG_WAIT_TIME;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev,
+ "iq/oq config failed status: %x\n",
+ retval);
+ goto qcount_update_fail;
+ }
+
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&oct->pci_dev->dev, "Wait interrupted\n");
+ return -1;
+ }
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "iq/oq config failed\n");
+ goto qcount_update_fail;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ lio->ifidx = ifidx_or_pfnum;
+ lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
+ lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++) {
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
+ }
+
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
+ }
+
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+
+ octeon_free_soft_command(oct, sc);
+ dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
+ lio->linfo.num_rxpciq);
+
+ return 0;
+
+qcount_update_fail:
+ octeon_free_soft_command(oct, sc);
+
+ return -1;
+}
+
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ int i, queue_count_update = 0;
struct napi_struct *napi, *n;
- int i, update = 0;
+ int ret;
+
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
@@ -836,7 +1113,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
if (octeon_set_io_queues_off(oct)) {
- dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
+ dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
return -1;
}
@@ -849,9 +1126,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
netif_napi_del(napi);
if (num_qs != oct->num_iqs) {
- netif_set_real_num_rx_queues(netdev, num_qs);
- netif_set_real_num_tx_queues(netdev, num_qs);
- update = 1;
+ ret = netif_set_real_num_rx_queues(netdev, num_qs);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting real number rx failed\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_tx_queues(netdev, num_qs);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting real number tx failed\n");
+ return ret;
+ }
+
+ /* The value of queue_count_update decides whether it is the
+ * queue count or the descriptor count that is being
+ * re-configured.
+ */
+ queue_count_update = 1;
+ }
+
+ /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
+ * and SRIOV disabled. Few things like recreating queue zero, resetting
+ * glists and IRQs are required for both. For the latter, some more
+ * steps like updating sriov_info for the octeon device need to be done.
+ */
+ if (queue_count_update) {
+ lio_delete_glists(lio);
+
+ /* Delete mbox for PF which is SRIOV disabled because sriov_info
+ * will be now changed.
+ */
+ if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
+ oct->fn_list.free_mbox(oct);
}
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
@@ -866,24 +1174,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
octeon_delete_instr_queue(oct, i);
}
+ if (queue_count_update) {
+ /* For PF re-configure sriov related information */
+ if ((OCTEON_CN23XX_PF(oct)) &&
+ !oct->sriov_info.sriov_enabled) {
+ oct->sriov_info.num_pf_rings = num_qs;
+ if (cn23xx_sriov_config(oct)) {
+ dev_err(&oct->pci_dev->dev,
+ "Queue reset aborted: SRIOV config failed\n");
+ return -1;
+ }
+
+ num_qs = oct->sriov_info.num_pf_rings;
+ }
+ }
+
if (oct->fn_list.setup_device_regs(oct)) {
dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
return -1;
}
- if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
- dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
- return -1;
+ /* The following are needed in case of queue count re-configuration and
+ * not for descriptor count re-configuration.
+ */
+ if (queue_count_update) {
+ if (octeon_setup_instr_queues(oct))
+ return -1;
+
+ if (octeon_setup_output_queues(oct))
+ return -1;
+
+ /* Recreating mbox for PF that is SRIOV disabled */
+ if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
+ if (oct->fn_list.setup_mbox(oct)) {
+ dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
+ return -1;
+ }
+ }
+
+ /* Deleting and recreating IRQs whether the interface is SRIOV
+ * enabled or disabled.
+ */
+ if (lio_irq_reallocate_irqs(oct, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
+ return -1;
+ }
+
+ /* Enable the input and output queues for this Octeon device */
+ if (oct->fn_list.enable_io_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
+ return -1;
+ }
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->droq[i]->max_count,
+ oct->droq[i]->pkts_credit_reg);
+
+ /* Informing firmware about the new queue count. It is required
+ * for firmware to allocate more number of queues than those at
+ * load time.
+ */
+ if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
+ if (lio_23xx_reconfigure_queue_count(lio))
+ return -1;
+ }
}
- /* Enable the input and output queues for this Octeon device */
- if (oct->fn_list.enable_io_queues(oct)) {
- dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues");
+ /* Once firmware is aware of the new value, queues can be recreated */
+ if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
return -1;
}
- if (update && lio_send_queue_count_update(netdev, num_qs))
- return -1;
+ if (queue_count_update) {
+ if (lio_setup_glists(oct, lio, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
+ return -1;
+ }
+
+ /* Send firmware the information about new number of queues
+ * if the interface is a VF or a PF that is SRIOV enabled.
+ */
+ if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
+ if (lio_send_queue_count_update(netdev, num_qs))
+ return -1;
+ }
return 0;
}
@@ -928,7 +1303,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev,
CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
rx_count);
- if (lio_reset_queues(netdev, lio->linfo.num_txpciq))
+ if (lio_reset_queues(netdev, oct->num_iqs))
goto err_lio_reset_queues;
if (stopped)
@@ -1063,33 +1438,48 @@ lio_get_ethtool_stats(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
- struct net_device_stats *netstats = &netdev->stats;
+ struct rtnl_link_stats64 lstats;
int i = 0, j;
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
return;
- netdev->netdev_ops->ndo_get_stats(netdev);
- octnet_get_link_stats(netdev);
-
+ netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
/*sum of oct->droq[oq_no]->stats->rx_pkts_received */
- data[i++] = CVM_CAST64(netstats->rx_packets);
+ data[i++] = lstats.rx_packets;
/*sum of oct->instr_queue[iq_no]->stats.tx_done */
- data[i++] = CVM_CAST64(netstats->tx_packets);
+ data[i++] = lstats.tx_packets;
/*sum of oct->droq[oq_no]->stats->rx_bytes_received */
- data[i++] = CVM_CAST64(netstats->rx_bytes);
+ data[i++] = lstats.rx_bytes;
/*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
- data[i++] = CVM_CAST64(netstats->tx_bytes);
- data[i++] = CVM_CAST64(netstats->rx_errors);
- data[i++] = CVM_CAST64(netstats->tx_errors);
+ data[i++] = lstats.tx_bytes;
+ data[i++] = lstats.rx_errors +
+ oct_dev->link_stats.fromwire.fcs_err +
+ oct_dev->link_stats.fromwire.jabber_err +
+ oct_dev->link_stats.fromwire.l2_err +
+ oct_dev->link_stats.fromwire.frame_err;
+ data[i++] = lstats.tx_errors;
/*sum of oct->droq[oq_no]->stats->rx_dropped +
*oct->droq[oq_no]->stats->dropped_nodispatch +
*oct->droq[oq_no]->stats->dropped_toomany +
*oct->droq[oq_no]->stats->dropped_nomem
*/
- data[i++] = CVM_CAST64(netstats->rx_dropped);
+ data[i++] = lstats.rx_dropped +
+ oct_dev->link_stats.fromwire.fifo_err +
+ oct_dev->link_stats.fromwire.dmac_drop +
+ oct_dev->link_stats.fromwire.red_drops +
+ oct_dev->link_stats.fromwire.fw_err_pko +
+ oct_dev->link_stats.fromwire.fw_err_link +
+ oct_dev->link_stats.fromwire.fw_err_drop;
/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
- data[i++] = CVM_CAST64(netstats->tx_dropped);
+ data[i++] = lstats.tx_dropped +
+ oct_dev->link_stats.fromhost.max_collision_fail +
+ oct_dev->link_stats.fromhost.max_deferral_fail +
+ oct_dev->link_stats.fromhost.total_collisions +
+ oct_dev->link_stats.fromhost.fw_err_pko +
+ oct_dev->link_stats.fromhost.fw_err_link +
+ oct_dev->link_stats.fromhost.fw_err_drop +
+ oct_dev->link_stats.fromhost.fw_err_pki;
/* firmware tx stats */
/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
@@ -1124,6 +1514,10 @@ lio_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
+ /* Multicast packets sent by this port */
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
+
/* mac tx statistics */
/*CVMX_BGXX_CMRX_TX_STAT5 */
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
@@ -1160,6 +1554,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
*fw_total_fwd
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
+ /* Multicast packets received on this port */
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
/*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
/*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
@@ -1328,7 +1725,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
__attribute__((unused)),
u64 *data)
{
- struct net_device_stats *netstats = &netdev->stats;
+ struct rtnl_link_stats64 lstats;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int i = 0, j, vj;
@@ -1336,25 +1733,31 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
return;
- netdev->netdev_ops->ndo_get_stats(netdev);
+ netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
- data[i++] = CVM_CAST64(netstats->rx_packets);
+ data[i++] = lstats.rx_packets;
/* sum of oct->instr_queue[iq_no]->stats.tx_done */
- data[i++] = CVM_CAST64(netstats->tx_packets);
+ data[i++] = lstats.tx_packets;
/* sum of oct->droq[oq_no]->stats->rx_bytes_received */
- data[i++] = CVM_CAST64(netstats->rx_bytes);
+ data[i++] = lstats.rx_bytes;
/* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
- data[i++] = CVM_CAST64(netstats->tx_bytes);
- data[i++] = CVM_CAST64(netstats->rx_errors);
- data[i++] = CVM_CAST64(netstats->tx_errors);
+ data[i++] = lstats.tx_bytes;
+ data[i++] = lstats.rx_errors;
+ data[i++] = lstats.tx_errors;
/* sum of oct->droq[oq_no]->stats->rx_dropped +
* oct->droq[oq_no]->stats->dropped_nodispatch +
* oct->droq[oq_no]->stats->dropped_toomany +
* oct->droq[oq_no]->stats->dropped_nomem
*/
- data[i++] = CVM_CAST64(netstats->rx_dropped);
+ data[i++] = lstats.rx_dropped;
/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
- data[i++] = CVM_CAST64(netstats->tx_dropped);
+ data[i++] = lstats.tx_dropped;
+
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
+
/* lio->link_changes */
data[i++] = CVM_CAST64(lio->link_changes);
@@ -1765,161 +2168,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
return -EINTR;
}
-static void
-octnet_nic_stats_callback(struct octeon_device *oct_dev,
- u32 status, void *ptr)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_nic_stats_resp *resp =
- (struct oct_nic_stats_resp *)sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl =
- (struct oct_nic_stats_ctrl *)sc->ctxptr;
- struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
- struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
-
- struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
- struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
-
- if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
- octeon_swap_8B_data((u64 *)&resp->stats,
- (sizeof(struct oct_link_stats)) >> 3);
-
- /* RX link-level stats */
- rstats->total_rcvd = rsp_rstats->total_rcvd;
- rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
- rstats->total_bcst = rsp_rstats->total_bcst;
- rstats->total_mcst = rsp_rstats->total_mcst;
- rstats->runts = rsp_rstats->runts;
- rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
- /* Accounts for over/under-run of buffers */
- rstats->fifo_err = rsp_rstats->fifo_err;
- rstats->dmac_drop = rsp_rstats->dmac_drop;
- rstats->fcs_err = rsp_rstats->fcs_err;
- rstats->jabber_err = rsp_rstats->jabber_err;
- rstats->l2_err = rsp_rstats->l2_err;
- rstats->frame_err = rsp_rstats->frame_err;
-
- /* RX firmware stats */
- rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
- rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
- rstats->fw_err_pko = rsp_rstats->fw_err_pko;
- rstats->fw_err_link = rsp_rstats->fw_err_link;
- rstats->fw_err_drop = rsp_rstats->fw_err_drop;
- rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
- rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
-
- /* Number of packets that are LROed */
- rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
- /* Number of octets that are LROed */
- rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
- /* Number of LRO packets formed */
- rstats->fw_total_lro = rsp_rstats->fw_total_lro;
- /* Number of times lRO of packet aborted */
- rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
- rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
- rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
- rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
- rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
- /* intrmod: packet forward rate */
- rstats->fwd_rate = rsp_rstats->fwd_rate;
-
- /* TX link-level stats */
- tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
- tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
- tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
- tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
- tstats->ctl_sent = rsp_tstats->ctl_sent;
- /* Packets sent after one collision*/
- tstats->one_collision_sent = rsp_tstats->one_collision_sent;
- /* Packets sent after multiple collision*/
- tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
- /* Packets not sent due to max collisions */
- tstats->max_collision_fail = rsp_tstats->max_collision_fail;
- /* Packets not sent due to max deferrals */
- tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
- /* Accounts for over/under-run of buffers */
- tstats->fifo_err = rsp_tstats->fifo_err;
- tstats->runts = rsp_tstats->runts;
- /* Total number of collisions detected */
- tstats->total_collisions = rsp_tstats->total_collisions;
-
- /* firmware stats */
- tstats->fw_total_sent = rsp_tstats->fw_total_sent;
- tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
- tstats->fw_err_pko = rsp_tstats->fw_err_pko;
- tstats->fw_err_pki = rsp_tstats->fw_err_pki;
- tstats->fw_err_link = rsp_tstats->fw_err_link;
- tstats->fw_err_drop = rsp_tstats->fw_err_drop;
- tstats->fw_tso = rsp_tstats->fw_tso;
- tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
- tstats->fw_err_tso = rsp_tstats->fw_err_tso;
- tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
-
- resp->status = 1;
- } else {
- resp->status = -1;
- }
- complete(&ctrl->complete);
-}
-
-/* Configure interrupt moderation parameters */
-static int octnet_get_link_stats(struct net_device *netdev)
-{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct_dev = lio->oct_dev;
-
- struct octeon_soft_command *sc;
- struct oct_nic_stats_ctrl *ctrl;
- struct oct_nic_stats_resp *resp;
-
- int retval;
-
- /* Alloc soft command */
- sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct_dev,
- 0,
- sizeof(struct oct_nic_stats_resp),
- sizeof(struct octnic_ctrl_pkt));
-
- if (!sc)
- return -ENOMEM;
-
- resp = (struct oct_nic_stats_resp *)sc->virtrptr;
- memset(resp, 0, sizeof(struct oct_nic_stats_resp));
-
- ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
- memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
- ctrl->netdev = netdev;
- init_completion(&ctrl->complete);
-
- sc->iq_no = lio->linfo.txpciq[0].s.q_no;
-
- octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
- OPCODE_NIC_PORT_STATS, 0, 0, 0);
-
- sc->callback = octnet_nic_stats_callback;
- sc->callback_arg = sc;
- sc->wait_time = 500; /*in milli seconds*/
-
- retval = octeon_send_soft_command(oct_dev, sc);
- if (retval == IQ_SEND_FAILED) {
- octeon_free_soft_command(oct_dev, sc);
- return -EINVAL;
- }
-
- wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
-
- if (resp->status != 1) {
- octeon_free_soft_command(oct_dev, sc);
-
- return -EINVAL;
- }
-
- octeon_free_soft_command(oct_dev, sc);
-
- return 0;
-}
-
static int lio_get_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal)
{
@@ -2864,6 +3112,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
static const struct ethtool_ops lio_ethtool_ops = {
.get_link_ksettings = lio_get_link_ksettings,
+ .set_link_ksettings = lio_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 603a144d3d9c..8a815bb57177 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -138,33 +138,10 @@ union tx_info {
* by this structure in the NIC module.
*/
-#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
-
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
#define OCTNIC_GSO_MAX_SIZE \
(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
-/** Structure of a node in list of gather components maintained by
- * NIC driver for each network device.
- */
-struct octnic_gather {
- /** List manipulation. Next and prev pointers. */
- struct list_head list;
-
- /** Size of the gather component at sg in bytes. */
- int sg_size;
-
- /** Number of bytes that sg was adjusted to make it 8B-aligned. */
- int adjust;
-
- /** Gather component that can accommodate max sized fragment list
- * received from the IP layer.
- */
- struct octeon_sg_entry *sg;
-
- dma_addr_t sg_dma_ptr;
-};
-
struct handshake {
struct completion init;
struct completion started;
@@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void)
*/
static inline int check_txq_status(struct lio *lio)
{
- int numqs = lio->netdev->num_tx_queues;
+ int numqs = lio->netdev->real_num_tx_queues;
int ret_val = 0;
int q, iq;
@@ -542,148 +519,6 @@ static inline int check_txq_status(struct lio *lio)
}
/**
- * Remove the node at the head of the list. The list would be empty at
- * the end of this call if there are no more nodes in the list.
- */
-static inline struct list_head *list_delete_head(struct list_head *root)
-{
- struct list_head *node;
-
- if ((root->prev == root) && (root->next == root))
- node = NULL;
- else
- node = root->next;
-
- if (node)
- list_del(node);
-
- return node;
-}
-
-/**
- * \brief Delete gather lists
- * @param lio per-network private data
- */
-static void delete_glists(struct lio *lio)
-{
- struct octnic_gather *g;
- int i;
-
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
-
- if (!lio->glist)
- return;
-
- for (i = 0; i < lio->linfo.num_txpciq; i++) {
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist[i]);
- if (g)
- kfree(g);
- } while (g);
-
- if (lio->glists_virt_base && lio->glists_virt_base[i] &&
- lio->glists_dma_base && lio->glists_dma_base[i]) {
- lio_dma_free(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- lio->glists_virt_base[i],
- lio->glists_dma_base[i]);
- }
- }
-
- kfree(lio->glists_virt_base);
- lio->glists_virt_base = NULL;
-
- kfree(lio->glists_dma_base);
- lio->glists_dma_base = NULL;
-
- kfree(lio->glist);
- lio->glist = NULL;
-}
-
-/**
- * \brief Setup gather lists
- * @param lio per-network private data
- */
-static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
-{
- int i, j;
- struct octnic_gather *g;
-
- lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
- GFP_KERNEL);
- if (!lio->glist_lock)
- return -ENOMEM;
-
- lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
- GFP_KERNEL);
- if (!lio->glist) {
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
- return -ENOMEM;
- }
-
- lio->glist_entry_size =
- ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
-
- /* allocate memory to store virtual and dma base address of
- * per glist consistent memory
- */
- lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
- GFP_KERNEL);
- lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
- GFP_KERNEL);
-
- if (!lio->glists_virt_base || !lio->glists_dma_base) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (i = 0; i < num_iqs; i++) {
- int numa_node = dev_to_node(&oct->pci_dev->dev);
-
- spin_lock_init(&lio->glist_lock[i]);
-
- INIT_LIST_HEAD(&lio->glist[i]);
-
- lio->glists_virt_base[i] =
- lio_dma_alloc(oct,
- lio->glist_entry_size * lio->tx_qsize,
- &lio->glists_dma_base[i]);
-
- if (!lio->glists_virt_base[i]) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (j = 0; j < lio->tx_qsize; j++) {
- g = kzalloc_node(sizeof(*g), GFP_KERNEL,
- numa_node);
- if (!g)
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
-
- g->sg = lio->glists_virt_base[i] +
- (j * lio->glist_entry_size);
-
- g->sg_dma_ptr = lio->glists_dma_base[i] +
- (j * lio->glist_entry_size);
-
- list_add_tail(&g->list, &lio->glist[i]);
- }
-
- if (j != lio->tx_qsize) {
- delete_glists(lio);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
* \brief Print link information
* @param netdev network device
*/
@@ -1077,6 +912,9 @@ liquidio_probe(struct pci_dev *pdev,
/* set linux specific device pointer */
oct_dev->pci_dev = (void *)pdev;
+ oct_dev->subsystem_id = pdev->subsystem_vendor |
+ (pdev->subsystem_device << 16);
+
hs = &handshake[oct_dev->octeon_id];
init_completion(&hs->init);
init_completion(&hs->started);
@@ -1471,7 +1309,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_rx_oom_poll_fn(netdev);
- delete_glists(lio);
+ lio_delete_glists(lio);
free_netdev(netdev);
@@ -1686,7 +1524,7 @@ static void free_netsgbuf(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
spin_unlock(&lio->glist_lock[iq]);
@@ -1729,7 +1567,7 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1928,7 +1766,7 @@ static int load_firmware(struct octeon_device *oct)
ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
if (ret) {
- dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
+ dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
fw_name);
release_firmware(fw);
return ret;
@@ -1942,39 +1780,6 @@ static int load_firmware(struct octeon_device *oct)
}
/**
- * \brief Callback for getting interface configuration
- * @param status status of request
- * @param buf pointer to resp structure
- */
-static void if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)),
- void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_resp *resp;
- struct liquidio_if_cfg_context *ctx;
-
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
-
- oct = lio_get_device(ctx->octeon_id);
- if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
- CVM_CAST64(resp->status), status);
- WRITE_ONCE(ctx->cond, 1);
-
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
- resp->cfg_info.liquidio_firmware_version);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
-/**
* \brief Poll routine for checking transmit queue status
* @param work work_struct data structure
*/
@@ -2049,11 +1854,6 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- /* Ready for link status updates */
- lio->intf_open = 1;
-
- netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
-
if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on)
if (setup_tx_poll_fn(netdev))
@@ -2063,7 +1863,12 @@ static int liquidio_open(struct net_device *netdev)
return -1;
}
- start_txqs(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
@@ -2086,11 +1891,15 @@ static int liquidio_stop(struct net_device *netdev)
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
- netif_tx_disable(netdev);
+ /* Stop any link updates */
+ lio->intf_open = 0;
+
+ stop_txqs(netdev);
/* Inform that netif carrier is down */
netif_carrier_off(netdev);
- lio->intf_open = 0;
+ netif_tx_disable(netdev);
+
lio->linfo.link.s.link_up = 0;
lio->link_changes++;
@@ -2252,14 +2061,11 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/**
- * \brief Net device get_stats
- * @param netdev network device
- */
-static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+static void
+liquidio_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *lstats)
{
struct lio *lio = GET_LIO(netdev);
- struct net_device_stats *stats = &netdev->stats;
struct octeon_device *oct;
u64 pkts = 0, drop = 0, bytes = 0;
struct oct_droq_stats *oq_stats;
@@ -2269,7 +2075,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
- return stats;
+ return;
for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
@@ -2279,9 +2085,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += iq_stats->tx_tot_bytes;
}
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = drop;
+ lstats->tx_packets = pkts;
+ lstats->tx_bytes = bytes;
+ lstats->tx_dropped = drop;
pkts = 0;
drop = 0;
@@ -2298,11 +2104,34 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += oq_stats->rx_bytes_received;
}
- stats->rx_bytes = bytes;
- stats->rx_packets = pkts;
- stats->rx_dropped = drop;
-
- return stats;
+ lstats->rx_bytes = bytes;
+ lstats->rx_packets = pkts;
+ lstats->rx_dropped = drop;
+
+ octnet_get_link_stats(netdev);
+ lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
+ lstats->collisions = oct->link_stats.fromhost.total_collisions;
+
+ /* detailed rx_errors: */
+ lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
+ /* recved pkt with crc error */
+ lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
+ /* recv'd frame alignment error */
+ lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
+ /* recv'r fifo overrun */
+ lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
+
+ lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
+ lstats->rx_frame_errors + lstats->rx_fifo_errors;
+
+ /* detailed tx_errors */
+ lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
+ lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
+ lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
+
+ lstats->tx_errors = lstats->tx_aborted_errors +
+ lstats->tx_carrier_errors +
+ lstats->tx_fifo_errors;
}
/**
@@ -2510,7 +2339,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev);
oct = lio->oct_dev;
- q_idx = skb_iq(lio, skb);
+ q_idx = skb_iq(oct, skb);
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
@@ -2585,6 +2414,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
__func__);
+ stats->tx_dmamap_fail++;
return NETDEV_TX_BUSY;
}
@@ -2602,7 +2432,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock(&lio->glist_lock[q_idx]);
g = (struct octnic_gather *)
- list_delete_head(&lio->glist[q_idx]);
+ lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
@@ -2624,6 +2454,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
__func__);
+ stats->tx_dmamap_fail++;
return NETDEV_TX_BUSY;
}
add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
@@ -3324,11 +3155,36 @@ static const struct switchdev_ops lio_pf_switchdev_ops = {
.switchdev_port_attr_get = lio_pf_switchdev_attr_get,
};
+static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_vf_stats stats;
+ int ret;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ memset(&stats, 0, sizeof(struct oct_vf_stats));
+ ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
+ if (!ret) {
+ vf_stats->rx_packets = stats.rx_packets;
+ vf_stats->tx_packets = stats.tx_packets;
+ vf_stats->rx_bytes = stats.rx_bytes;
+ vf_stats->tx_bytes = stats.tx_bytes;
+ vf_stats->broadcast = stats.broadcast;
+ vf_stats->multicast = stats.multicast;
+ }
+
+ return ret;
+}
+
static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
.ndo_start_xmit = liquidio_xmit,
- .ndo_get_stats = liquidio_get_stats,
+ .ndo_get_stats64 = liquidio_get_stats64,
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
@@ -3346,6 +3202,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_get_vf_config = liquidio_get_vf_config,
.ndo_set_vf_trust = liquidio_set_vf_trust,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
+ .ndo_get_vf_stats = liquidio_get_vf_stats,
};
/** \brief Entry point for the liquidio module
@@ -3448,6 +3305,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
int retval, num_iqueues, num_oqueues;
+ int max_num_queues = 0;
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
@@ -3528,9 +3386,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0);
- sc->callback = if_cfg_callback;
+ sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
- sc->wait_time = 3000;
+ sc->wait_time = LIO_IFCFG_WAIT_TIME;
retval = octeon_send_soft_command(octeon_dev, sc);
if (retval == IQ_SEND_FAILED) {
@@ -3584,11 +3442,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
resp->cfg_info.oqmask);
goto setup_nic_dev_fail;
}
+
+ if (OCTEON_CN6XXX(octeon_dev)) {
+ max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
+ cn6xxx));
+ } else if (OCTEON_CN23XX_PF(octeon_dev)) {
+ max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
+ cn23xx_pf));
+ }
+
dev_dbg(&octeon_dev->pci_dev->dev,
- "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
- num_iqueues, num_oqueues);
- netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+ num_iqueues, num_oqueues, max_num_queues);
+ netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
if (!netdev) {
dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
@@ -3603,6 +3470,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->netdev_ops = &lionetdevops;
SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
+ retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "setting real number rx failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "setting real number tx failed\n");
+ goto setup_nic_dev_fail;
+ }
+
lio = GET_LIO(netdev);
memset(lio, 0, sizeof(struct lio));
@@ -3724,7 +3605,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glists(octeon_dev, lio, num_iqueues)) {
+ if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -3786,6 +3667,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"NIC ifidx:%d Setup successful\n", i);
octeon_free_soft_command(octeon_dev, sc);
+
+ if (octeon_dev->subsystem_id ==
+ OCTEON_CN2350_25GB_SUBSYS_ID ||
+ octeon_dev->subsystem_id ==
+ OCTEON_CN2360_25GB_SUBSYS_ID) {
+ liquidio_get_speed(lio);
+
+ if (octeon_dev->speed_setting == 0) {
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ }
+ } else {
+ octeon_dev->no_speed_setting = 1;
+ octeon_dev->speed_setting = 10;
+ }
+ octeon_dev->speed_boot = octeon_dev->speed_setting;
+
}
devlink = devlink_alloc(&liquidio_devlink_ops,
@@ -4223,7 +4121,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
}
atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
- if (octeon_allocate_ioq_vector(octeon_dev)) {
+ if (octeon_allocate_ioq_vector
+ (octeon_dev,
+ octeon_dev->sriov_info.num_pf_rings)) {
dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
return 1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f92dfa411de6..7fa0212873ac 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -69,30 +69,10 @@ union tx_info {
} s;
};
-#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
-
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
#define OCTNIC_GSO_MAX_SIZE \
(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
-struct octnic_gather {
- /* List manipulation. Next and prev pointers. */
- struct list_head list;
-
- /* Size of the gather component at sg in bytes. */
- int sg_size;
-
- /* Number of bytes that sg was adjusted to make it 8B-aligned. */
- int adjust;
-
- /* Gather component that can accommodate max sized fragment list
- * received from the IP layer.
- */
- struct octeon_sg_entry *sg;
-
- dma_addr_t sg_dma_ptr;
-};
-
static int
liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void liquidio_vf_remove(struct pci_dev *pdev);
@@ -285,142 +265,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
};
/**
- * Remove the node at the head of the list. The list would be empty at
- * the end of this call if there are no more nodes in the list.
- */
-static struct list_head *list_delete_head(struct list_head *root)
-{
- struct list_head *node;
-
- if ((root->prev == root) && (root->next == root))
- node = NULL;
- else
- node = root->next;
-
- if (node)
- list_del(node);
-
- return node;
-}
-
-/**
- * \brief Delete gather lists
- * @param lio per-network private data
- */
-static void delete_glists(struct lio *lio)
-{
- struct octnic_gather *g;
- int i;
-
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
-
- if (!lio->glist)
- return;
-
- for (i = 0; i < lio->linfo.num_txpciq; i++) {
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist[i]);
- kfree(g);
- } while (g);
-
- if (lio->glists_virt_base && lio->glists_virt_base[i] &&
- lio->glists_dma_base && lio->glists_dma_base[i]) {
- lio_dma_free(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- lio->glists_virt_base[i],
- lio->glists_dma_base[i]);
- }
- }
-
- kfree(lio->glists_virt_base);
- lio->glists_virt_base = NULL;
-
- kfree(lio->glists_dma_base);
- lio->glists_dma_base = NULL;
-
- kfree(lio->glist);
- lio->glist = NULL;
-}
-
-/**
- * \brief Setup gather lists
- * @param lio per-network private data
- */
-static int setup_glists(struct lio *lio, int num_iqs)
-{
- struct octnic_gather *g;
- int i, j;
-
- lio->glist_lock =
- kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
- if (!lio->glist_lock)
- return -ENOMEM;
-
- lio->glist =
- kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
- if (!lio->glist) {
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
- return -ENOMEM;
- }
-
- lio->glist_entry_size =
- ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
-
- /* allocate memory to store virtual and dma base address of
- * per glist consistent memory
- */
- lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
- GFP_KERNEL);
- lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
- GFP_KERNEL);
-
- if (!lio->glists_virt_base || !lio->glists_dma_base) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (i = 0; i < num_iqs; i++) {
- spin_lock_init(&lio->glist_lock[i]);
-
- INIT_LIST_HEAD(&lio->glist[i]);
-
- lio->glists_virt_base[i] =
- lio_dma_alloc(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- &lio->glists_dma_base[i]);
-
- if (!lio->glists_virt_base[i]) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (j = 0; j < lio->tx_qsize; j++) {
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
-
- g->sg = lio->glists_virt_base[i] +
- (j * lio->glist_entry_size);
-
- g->sg_dma_ptr = lio->glists_dma_base[i] +
- (j * lio->glist_entry_size);
-
- list_add_tail(&g->list, &lio->glist[i]);
- }
-
- if (j != lio->tx_qsize) {
- delete_glists(lio);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
* \brief Print link information
* @param netdev network device
*/
@@ -567,6 +411,9 @@ liquidio_vf_probe(struct pci_dev *pdev,
/* set linux specific device pointer */
oct_dev->pci_dev = pdev;
+ oct_dev->subsystem_id = pdev->subsystem_vendor |
+ (pdev->subsystem_device << 16);
+
if (octeon_device_init(oct_dev)) {
liquidio_vf_remove(pdev);
return -ENOMEM;
@@ -856,7 +703,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_link_status_change_wq(netdev);
- delete_glists(lio);
+ lio_delete_glists(lio);
free_netdev(netdev);
@@ -1005,7 +852,7 @@ static void free_netsgbuf(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1049,7 +896,7 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1059,38 +906,6 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Callback for getting interface configuration
- * @param status status of request
- * @param buf pointer to resp structure
- */
-static void if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)), void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_context *ctx;
- struct liquidio_if_cfg_resp *resp;
-
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
-
- oct = lio_get_device(ctx->octeon_id);
- if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
- CVM_CAST64(resp->status));
- WRITE_ONCE(ctx->cond, 1);
-
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
- resp->cfg_info.liquidio_firmware_version);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
-/**
* \brief Net device open for LiquidIO
* @param netdev network device
*/
@@ -1336,24 +1151,21 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/**
- * \brief Net device get_stats
- * @param netdev network device
- */
-static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+static void
+liquidio_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *lstats)
{
struct lio *lio = GET_LIO(netdev);
- struct net_device_stats *stats = &netdev->stats;
+ struct octeon_device *oct;
u64 pkts = 0, drop = 0, bytes = 0;
struct oct_droq_stats *oq_stats;
struct oct_iq_stats *iq_stats;
- struct octeon_device *oct;
int i, iq_no, oq_no;
oct = lio->oct_dev;
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
- return stats;
+ return;
for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
@@ -1363,9 +1175,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += iq_stats->tx_tot_bytes;
}
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = drop;
+ lstats->tx_packets = pkts;
+ lstats->tx_bytes = bytes;
+ lstats->tx_dropped = drop;
pkts = 0;
drop = 0;
@@ -1382,11 +1194,29 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += oq_stats->rx_bytes_received;
}
- stats->rx_bytes = bytes;
- stats->rx_packets = pkts;
- stats->rx_dropped = drop;
+ lstats->rx_bytes = bytes;
+ lstats->rx_packets = pkts;
+ lstats->rx_dropped = drop;
+
+ octnet_get_link_stats(netdev);
+ lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
+
+ /* detailed rx_errors: */
+ lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
+ /* recved pkt with crc error */
+ lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
+ /* recv'd frame alignment error */
+ lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
- return stats;
+ lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
+ lstats->rx_frame_errors;
+
+ /* detailed tx_errors */
+ lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
+ lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
+
+ lstats->tx_errors = lstats->tx_aborted_errors +
+ lstats->tx_carrier_errors;
}
/**
@@ -1580,7 +1410,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev);
oct = lio->oct_dev;
- q_idx = skb_iq(lio, skb);
+ q_idx = skb_iq(lio->oct_dev, skb);
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
@@ -1661,8 +1491,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
int i, frags;
spin_lock(&lio->glist_lock[q_idx]);
- g = (struct octnic_gather *)list_delete_head(
- &lio->glist[q_idx]);
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
@@ -2034,7 +1864,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
.ndo_start_xmit = liquidio_xmit,
- .ndo_get_stats = liquidio_get_stats,
+ .ndo_get_stats64 = liquidio_get_stats64,
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
@@ -2156,7 +1986,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
0);
- sc->callback = if_cfg_callback;
+ sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
@@ -2273,6 +2103,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
netdev->hw_features = lio->dev_capability;
+ netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
/* MTU range: 68 - 16000 */
netdev->min_mtu = LIO_MIN_MTU_SIZE;
@@ -2321,7 +2152,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glists(lio, num_iqueues)) {
+ if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -2371,6 +2202,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"NIC ifidx:%d Setup successful\n", i);
octeon_free_soft_command(octeon_dev, sc);
+
+ octeon_dev->no_speed_setting = 1;
}
return 0;
@@ -2512,7 +2345,7 @@ static int octeon_device_init(struct octeon_device *oct)
}
atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
- if (octeon_allocate_ioq_vector(oct)) {
+ if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
return 1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 2adafa366d3f..ddd7431579f4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -201,13 +201,14 @@ lio_vf_rep_get_stats64(struct net_device *dev,
{
struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
- stats64->tx_packets = vf_rep->stats.tx_packets;
- stats64->tx_bytes = vf_rep->stats.tx_bytes;
- stats64->tx_dropped = vf_rep->stats.tx_dropped;
-
- stats64->rx_packets = vf_rep->stats.rx_packets;
- stats64->rx_bytes = vf_rep->stats.rx_bytes;
- stats64->rx_dropped = vf_rep->stats.rx_dropped;
+ /* Swap tx and rx stats as VF rep is a switch port */
+ stats64->tx_packets = vf_rep->stats.rx_packets;
+ stats64->tx_bytes = vf_rep->stats.rx_bytes;
+ stats64->tx_dropped = vf_rep->stats.rx_dropped;
+
+ stats64->rx_packets = vf_rep->stats.tx_packets;
+ stats64->rx_bytes = vf_rep->stats.tx_bytes;
+ stats64->rx_dropped = vf_rep->stats.tx_dropped;
}
static int
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 75eea83c7cc6..690424b6781a 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -28,7 +28,7 @@
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
#define LIQUIDIO_BASE_MINOR_VERSION 7
-#define LIQUIDIO_BASE_MICRO_VERSION 0
+#define LIQUIDIO_BASE_MICRO_VERSION 2
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
@@ -84,6 +84,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_IF_CFG 0x09
#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
+#define OPCODE_NIC_QCOUNT_UPDATE 0x12
#define OPCODE_NIC_SET_TRUSTED_VF 0x13
#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14
#define VF_DRV_LOADED 1
@@ -92,6 +93,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_VF_REP_PKT 0x15
#define OPCODE_NIC_VF_REP_CMD 0x16
+#define OPCODE_NIC_UBOOT_CTL 0x17
#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
@@ -248,6 +250,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
+#define SEAPI_CMD_SPEED_SET 0x2
+#define SEAPI_CMD_SPEED_GET 0x3
+
#define LIO_CMD_WAIT_TM 100
/* RX(packets coming from wire) Checksum verification flags */
@@ -779,23 +784,32 @@ struct liquidio_if_cfg_info {
/** Stats for each NIC port in RX direction. */
struct nic_rx_stats {
/* link-level stats */
- u64 total_rcvd;
- u64 bytes_rcvd;
- u64 total_bcst;
- u64 total_mcst;
- u64 runts;
- u64 ctl_rcvd;
- u64 fifo_err; /* Accounts for over/under-run of buffers */
- u64 dmac_drop;
- u64 fcs_err;
- u64 jabber_err;
- u64 l2_err;
- u64 frame_err;
+ u64 total_rcvd; /* Received packets */
+ u64 bytes_rcvd; /* Octets of received packets */
+ u64 total_bcst; /* Number of non-dropped L2 broadcast packets */
+ u64 total_mcst; /* Number of non-dropped L2 multicast packets */
+ u64 runts; /* Packets shorter than allowed */
+ u64 ctl_rcvd; /* Received PAUSE packets */
+ u64 fifo_err; /* Packets dropped due to RX FIFO full */
+ u64 dmac_drop; /* Packets dropped by the DMAC filter */
+ u64 fcs_err; /* Sum of fragment, overrun, and FCS errors */
+ u64 jabber_err; /* Packets larger than allowed */
+ u64 l2_err; /* Sum of DMA, parity, PCAM access, no memory,
+ * buffer overflow, malformed L2 header or
+ * length, oversize errors
+ **/
+ u64 frame_err; /* Sum of IPv4 and L4 checksum errors */
+ u64 red_drops; /* Packets dropped by RED due to buffer
+ * exhaustion
+ **/
/* firmware stats */
u64 fw_total_rcvd;
u64 fw_total_fwd;
u64 fw_total_fwd_bytes;
+ u64 fw_total_mcast;
+ u64 fw_total_bcast;
+
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
@@ -806,11 +820,11 @@ struct nic_rx_stats {
u64 fw_lro_pkts; /* Number of packets that are LROed */
u64 fw_lro_octs; /* Number of octets that are LROed */
u64 fw_total_lro; /* Number of LRO packets formed */
- u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ u64 fw_lro_aborts; /* Number of times LRO of packet aborted */
u64 fw_lro_aborts_port;
u64 fw_lro_aborts_seq;
u64 fw_lro_aborts_tsval;
- u64 fw_lro_aborts_timer;
+ u64 fw_lro_aborts_timer; /* Timer setting error */
/* intrmod: packet forward rate */
u64 fwd_rate;
};
@@ -818,23 +832,42 @@ struct nic_rx_stats {
/** Stats for each NIC port in RX direction. */
struct nic_tx_stats {
/* link-level stats */
- u64 total_pkts_sent;
- u64 total_bytes_sent;
- u64 mcast_pkts_sent;
- u64 bcast_pkts_sent;
- u64 ctl_sent;
- u64 one_collision_sent; /* Packets sent after one collision*/
- u64 multi_collision_sent; /* Packets sent after multiple collision*/
- u64 max_collision_fail; /* Packets not sent due to max collisions */
- u64 max_deferral_fail; /* Packets not sent due to max deferrals */
- u64 fifo_err; /* Accounts for over/under-run of buffers */
- u64 runts;
- u64 total_collisions; /* Total number of collisions detected */
+ u64 total_pkts_sent; /* Total frames sent on the interface */
+ u64 total_bytes_sent; /* Total octets sent on the interface */
+ u64 mcast_pkts_sent; /* Packets sent to the multicast DMAC */
+ u64 bcast_pkts_sent; /* Packets sent to a broadcast DMAC */
+ u64 ctl_sent; /* Control/PAUSE packets sent */
+ u64 one_collision_sent; /* Packets sent that experienced a
+ * single collision before successful
+ * transmission
+ **/
+ u64 multi_collision_sent; /* Packets sent that experienced
+ * multiple collisions before successful
+ * transmission
+ **/
+ u64 max_collision_fail; /* Packets dropped due to excessive
+ * collisions
+ **/
+ u64 max_deferral_fail; /* Packets not sent due to max
+ * deferrals
+ **/
+ u64 fifo_err; /* Packets sent that experienced a
+ * transmit underflow and were
+ * truncated
+ **/
+ u64 runts; /* Packets sent with an octet count
+ * lessthan 64
+ **/
+ u64 total_collisions; /* Packets dropped due to excessive
+ * collisions
+ **/
/* firmware stats */
u64 fw_total_sent;
u64 fw_total_fwd;
u64 fw_total_fwd_bytes;
+ u64 fw_total_mcast_sent;
+ u64 fw_total_bcast_sent;
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f38abf626412..f878a552fef3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct)
}
int
-octeon_allocate_ioq_vector(struct octeon_device *oct)
+octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
{
- int i, num_ioqs = 0;
struct octeon_ioq_vector *ioq_vector;
int cpu_num;
int size;
-
- if (OCTEON_CN23XX_PF(oct))
- num_ioqs = oct->sriov_info.num_pf_rings;
- else if (OCTEON_CN23XX_VF(oct))
- num_ioqs = oct->sriov_info.rings_per_vf;
+ int i;
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector)
- return 1;
+ return -1;
for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct;
@@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
else
ioq_vector->ioq_num = i;
}
+
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 91937cc5c1d7..94a4ed88d618 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -43,6 +43,13 @@
#define OCTEON_CN23XX_REV_1_1 0x01
#define OCTEON_CN23XX_REV_2_0 0x80
+/**SubsystemId for the chips */
+#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d
+#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d
+#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d
+#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d
+#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d
+
/** Endian-swap modes supported by Octeon. */
enum octeon_pci_swap_mode {
OCTEON_PCI_PASSTHROUGH = 0,
@@ -430,6 +437,8 @@ struct octeon_device {
u16 rev_id;
+ u32 subsystem_id;
+
u16 pf_num;
u16 vf_num;
@@ -584,6 +593,11 @@ struct octeon_device {
struct lio_vf_rep_list vf_rep_list;
struct devlink *devlink;
enum devlink_eswitch_mode eswitch_mode;
+
+ /* for 25G NIC speed change */
+ u8 speed_boot;
+ u8 speed_setting;
+ u8 no_speed_setting;
};
#define OCT_DRV_ONLINE 1
@@ -867,7 +881,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
void octeon_free_ioq_vector(struct octeon_device *oct);
-int octeon_allocate_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs);
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
/* LiquidIO driver pivate flags */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 81c987682941..5fed7b63223e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -62,8 +62,8 @@ struct oct_iq_stats {
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
u64 tx_gso; /* count of tso */
u64 tx_vxlan; /* tunnel */
- u64 tx_dmamap_fail;
- u64 tx_restart;
+ u64 tx_dmamap_fail; /* Number of times dma mapping failed */
+ u64 tx_restart; /* Number of times this queue restarted */
};
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 28e74ee23ff8..021d99cd1665 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -24,6 +24,7 @@
#include "octeon_device.h"
#include "octeon_main.h"
#include "octeon_mailbox.h"
+#include "cn23xx_pf_device.h"
/**
* octeon_mbox_read:
@@ -205,6 +206,26 @@ int octeon_mbox_write(struct octeon_device *oct,
return ret;
}
+static void get_vf_stats(struct octeon_device *oct,
+ struct oct_vf_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ if (!oct->instr_queue[i])
+ continue;
+ stats->tx_packets += oct->instr_queue[i]->stats.tx_done;
+ stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes;
+ }
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (!oct->droq[i])
+ continue;
+ stats->rx_packets += oct->droq[i]->stats.rx_pkts_received;
+ stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received;
+ }
+}
+
/**
* octeon_mbox_process_cmd:
* @mbox: Pointer mailbox
@@ -250,6 +271,15 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
mbox_cmd->msg.s.params);
break;
+ case OCTEON_GET_VF_STATS:
+ dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n");
+ mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
+ mbox_cmd->msg.s.resp_needed = 1;
+ mbox_cmd->msg.s.len = 1 +
+ sizeof(struct oct_vf_stats) / sizeof(u64);
+ get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data);
+ octeon_mbox_write(oct, mbox_cmd);
+ break;
default:
break;
}
@@ -322,3 +352,25 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox)
return 0;
}
+
+int octeon_mbox_cancel(struct octeon_device *oct, int q_no)
+{
+ struct octeon_mbox *mbox = oct->mbox[q_no];
+ struct octeon_mbox_cmd *mbox_cmd;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+ mbox_cmd = &mbox->mbox_resp;
+
+ if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return 1;
+ }
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ memset(mbox_cmd, 0, sizeof(*mbox_cmd));
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index 1def22afeff1..d92bd7e16477 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -25,6 +25,7 @@
#define OCTEON_VF_ACTIVE 0x1
#define OCTEON_VF_FLR_REQUEST 0x2
#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
+#define OCTEON_GET_VF_STATS 0x8
/*Macro for Read acknowldgement*/
#define OCTEON_PFVFACK 0xffffffffffffffffULL
@@ -107,9 +108,15 @@ struct octeon_mbox {
};
+struct oct_vf_stats_ctx {
+ atomic_t status;
+ struct oct_vf_stats *stats;
+};
+
int octeon_mbox_read(struct octeon_mbox *mbox);
int octeon_mbox_write(struct octeon_device *oct,
struct octeon_mbox_cmd *mbox_cmd);
int octeon_mbox_process_message(struct octeon_mbox *mbox);
+int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 4069710796a8..d7a3916fe877 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp {
u64 status;
};
+#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
+
+/* Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+ /* List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /* Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /* Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /* Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+
+ dma_addr_t sg_dma_ptr;
+};
+
struct oct_nic_stats_resp {
u64 rh;
struct oct_link_stats stats;
@@ -58,6 +81,18 @@ struct oct_nic_stats_ctrl {
struct net_device *netdev;
};
+struct oct_nic_seapi_resp {
+ u64 rh;
+ u32 speed;
+ u64 status;
+};
+
+struct liquidio_nic_seapi_ctl_context {
+ int octeon_id;
+ u32 status;
+ struct completion complete;
+};
+
/** LiquidIO per-interface network private data */
struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -157,7 +192,7 @@ struct lio {
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
-#define LIO_MAX_CORES 12
+#define LIO_MAX_CORES 16
/**
* \brief Enable or disable feature
@@ -190,6 +225,8 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
+int octnet_get_link_stats(struct net_device *netdev);
+
int lio_wait_for_clean_oq(struct octeon_device *oct);
/**
* \brief Register ethtool operations
@@ -197,6 +234,17 @@ int lio_wait_for_clean_oq(struct octeon_device *oct);
*/
void liquidio_set_ethtool_ops(struct net_device *netdev);
+void lio_if_cfg_callback(struct octeon_device *oct,
+ u32 status __attribute__((unused)),
+ void *buf);
+
+void lio_delete_glists(struct lio *lio);
+
+int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
+
+int liquidio_get_speed(struct lio *lio);
+int liquidio_set_speed(struct lio *lio, int speed);
+
/**
* \brief Net device change_mtu
* @param netdev network device
@@ -515,7 +563,7 @@ static inline void stop_txqs(struct net_device *netdev)
{
int i;
- for (i = 0; i < netdev->num_tx_queues; i++)
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
}
@@ -528,7 +576,7 @@ static inline void wake_txqs(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
int i, qno;
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
@@ -549,14 +597,33 @@ static inline void start_txqs(struct net_device *netdev)
int i;
if (lio->linfo.link.s.link_up) {
- for (i = 0; i < netdev->num_tx_queues; i++)
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
netif_start_subqueue(netdev, i);
}
}
-static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
+static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
{
- return skb->queue_mapping % lio->linfo.num_txpciq;
+ return skb->queue_mapping % oct->num_iqs;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *lio_list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if (root->prev == root && root->next == root)
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
}
#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 707db3304396..7135db45927e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -538,9 +538,9 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
action = bpf_prog_run_xdp(prog, &xdp);
rcu_read_unlock();
+ len = xdp.data_end - xdp.data;
/* Check if XDP program has changed headers */
if (orig_data != xdp.data) {
- len = xdp.data_end - xdp.data;
offset = orig_data - xdp.data;
dma_addr -= offset;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index e988caa797cb..20b6e1b3f5e3 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
{
size_t len = nelem * elem_size;
void *s = NULL;
- void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+ void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
@@ -633,7 +633,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
}
*(void **)metadata = s;
}
- memset(p, 0, len);
return p;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index dc25066c59a1..3c5057868ab3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -62,6 +62,18 @@ struct cudbg_hw_sched {
u32 map;
};
+#define SGE_QBASE_DATA_REG_NUM 4
+
+struct sge_qbase_reg_field {
+ u32 reg_addr;
+ u32 reg_data[SGE_QBASE_DATA_REG_NUM];
+ /* Max supported PFs */
+ u32 pf_data_value[PCIE_FW_MASTER_M + 1][SGE_QBASE_DATA_REG_NUM];
+ /* Max supported VFs */
+ u32 vf_data_value[T6_VF_M + 1][SGE_QBASE_DATA_REG_NUM];
+ u32 vfcount; /* Actual number of max vfs in current configuration */
+};
+
struct ireg_field {
u32 ireg_addr;
u32 ireg_data;
@@ -235,6 +247,9 @@ struct cudbg_vpd_data {
};
#define CUDBG_MAX_TCAM_TID 0x800
+#define CUDBG_T6_CLIP 1536
+#define CUDBG_MAX_TID_COMP_EN 6144
+#define CUDBG_MAX_TID_COMP_DIS 3072
enum cudbg_le_entry_types {
LE_ET_UNKNOWN = 0,
@@ -354,6 +369,11 @@ static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
{0x10cc, 0x10d4, 0x0, 16},
};
+static const u32 t6_sge_qbase_index_array[] = {
+ /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */
+ 0x1250, 0x1240, 0x1244, 0x1248, 0x124c,
+};
+
static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
{0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
{0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index 8568a51f6414..215fe6260fd7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -24,6 +24,7 @@
#define CUDBG_STATUS_NOT_IMPLEMENTED -28
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
+#define CUDBG_STATUS_PARTIAL_DATA -41
#define CUDBG_MAJOR_VERSION 1
#define CUDBG_MINOR_VERSION 14
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 9da6f57901a9..0afcfe99bff3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1339,16 +1339,39 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
+static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
+ struct sge_qbase_reg_field *qbase,
+ u32 func, bool is_pf)
+{
+ u32 *buff, i;
+
+ if (is_pf) {
+ buff = qbase->pf_data_value[func];
+ } else {
+ buff = qbase->vf_data_value[func];
+ /* In SGE_QBASE_INDEX,
+ * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
+ */
+ func += 8;
+ }
+
+ t4_write_reg(padap, qbase->reg_addr, func);
+ for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
+ *buff = t4_read_reg(padap, qbase->reg_data[i]);
+}
+
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ struct sge_qbase_reg_field *sge_qbase;
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
&temp_buff);
if (rc)
return rc;
@@ -1370,6 +1393,28 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
+
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
+ /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
+ * SGE_QBASE_MAP[0-3]
+ */
+ sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
+ for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
+ sge_qbase->reg_data[i] =
+ t6_sge_qbase_index_array[i + 1];
+
+ for (i = 0; i <= PCIE_FW_MASTER_M; i++)
+ cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
+ i, true);
+
+ for (i = 0; i < padap->params.arch.vfcount; i++)
+ cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
+ i, false);
+
+ sge_qbase->vfcount = padap->params.arch.vfcount;
+ }
+
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
@@ -2366,8 +2411,11 @@ void cudbg_fill_le_tcam_info(struct adapter *padap,
value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
tcam_region->routing_start = value;
- /*Get clip table index */
- value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
+ /* Get clip table index. For T6 there is separate CLIP TCAM */
+ if (is_t6(padap->params.chip))
+ value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
+ else
+ value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
tcam_region->clip_start = value;
/* Get filter table index */
@@ -2392,8 +2440,16 @@ void cudbg_fill_le_tcam_info(struct adapter *padap,
tcam_region->tid_hash_base;
}
} else { /* hash not enabled */
- tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
+ if (is_t6(padap->params.chip))
+ tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
+ CUDBG_MAX_TID_COMP_EN :
+ CUDBG_MAX_TID_COMP_DIS;
+ else
+ tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
}
+
+ if (is_t6(padap->params.chip))
+ tcam_region->max_tid += CUDBG_T6_CLIP;
}
int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
@@ -2423,18 +2479,31 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
for (i = 0; i < tcam_region.max_tid; ) {
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
- cudbg_err->sys_err = rc;
- cudbg_put_buff(pdbg_init, &temp_buff);
- return rc;
+ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+ /* Update tcam header and exit */
+ tcam_region.max_tid = i;
+ memcpy(temp_buff.data, &tcam_region,
+ sizeof(struct cudbg_tcam));
+ goto out;
}
- /* ipv6 takes two tids */
- cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
+ if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
+ /* T6 CLIP TCAM: ipv6 takes 4 entries */
+ if (is_t6(padap->params.chip) &&
+ i >= tcam_region.clip_start &&
+ i < tcam_region.clip_start + CUDBG_T6_CLIP)
+ i += 4;
+ else /* Main TCAM: ipv6 takes two tids */
+ i += 2;
+ } else {
+ i++;
+ }
tid_data++;
bytes += sizeof(struct cudbg_tid_data);
}
+out:
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 688f95440af2..0dbe2d9e22d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,6 +50,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
+#include <linux/crash_dump.h>
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
@@ -490,6 +491,9 @@ struct link_config {
unsigned char link_ok; /* link up? */
unsigned char link_down_rc; /* link down reason */
+
+ bool new_module; /* ->OS Transceiver Module inserted */
+ bool redo_l1cfg; /* ->CC redo current "sticky" L1 CFG */
};
#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
@@ -964,6 +968,9 @@ struct adapter {
struct hma_data hma;
struct srq_data *srq;
+
+ /* Dump buffer for collecting logs in kdump kernel */
+ struct vmcoredd_data vmcoredd;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1034,6 +1041,7 @@ struct ch_sched_queue {
#define VF_BITWIDTH 8
#define IVLAN_BITWIDTH 16
#define OVLAN_BITWIDTH 16
+#define ENCAP_VNI_BITWIDTH 24
/* Filter matching rules. These consist of a set of ingress packet field
* (value, mask) tuples. The associated ingress packet field matches the
@@ -1064,6 +1072,7 @@ struct ch_filter_tuple {
uint32_t ivlan_vld:1; /* inner VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN valid */
uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t encap_vld:1; /* Encapsulation valid */
uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
uint32_t iport:IPORT_BITWIDTH; /* ingress port */
@@ -1074,6 +1083,7 @@ struct ch_filter_tuple {
uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+ uint32_t vni:ENCAP_VNI_BITWIDTH; /* VNI of tunnel */
/* Uncompressed header matching field rules. These are always
* available for field rules.
@@ -1317,7 +1327,7 @@ static inline unsigned int qtimer_val(const struct adapter *adap,
extern char cxgb4_driver_name[];
extern const char cxgb4_driver_version[];
-void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
@@ -1498,8 +1508,25 @@ void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
- struct link_config *lc);
+
+int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+ bool sleep_ok, int timeout);
+
+static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc)
+{
+ return t4_link_l1cfg_core(adapter, mbox, port, lc,
+ true, FW_CMD_MAX_TIMEOUT);
+}
+
+static inline int t4_link_l1cfg_ns(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc)
+{
+ return t4_link_l1cfg_core(adapter, mbox, port, lc,
+ false, FW_CMD_MAX_TIMEOUT);
+}
+
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
@@ -1690,6 +1717,12 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
const u8 *addr, const u8 *mask, unsigned int idx,
u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx,
+ bool sleep_ok);
+int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int vni,
+ unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
+ bool sleep_ok);
int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
const u8 *addr, const u8 *mask, unsigned int idx,
u8 lookup_type, u8 port_id, bool sleep_ok);
@@ -1705,6 +1738,9 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
+int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
+ struct port_info *pi,
+ bool rx_en, bool tx_en, bool dcb_en);
int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en);
int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 143686c60234..8d751efcb90e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -214,7 +214,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_SGE_INDIRECT:
- len = sizeof(struct ireg_buf) * 2;
+ len = sizeof(struct ireg_buf) * 2 +
+ sizeof(struct sge_qbase_reg_field);
break;
case CUDBG_ULPRX_LA:
len = sizeof(struct cudbg_ulprx_la);
@@ -488,3 +489,28 @@ void cxgb4_init_ethtool_dump(struct adapter *adapter)
adapter->eth_dump.version = adapter->params.fw_vers;
adapter->eth_dump.len = 0;
}
+
+static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf)
+{
+ struct adapter *adap = container_of(data, struct adapter, vmcoredd);
+ u32 len = data->size;
+
+ return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL);
+}
+
+int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap)
+{
+ struct vmcoredd_data *data = &adap->vmcoredd;
+ u32 len;
+
+ len = sizeof(struct cudbg_hdr) +
+ sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
+ len += CUDBG_DUMP_BUFF_SIZE;
+
+ data->size = len;
+ snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s",
+ cxgb4_driver_name, adap->name);
+ data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect;
+
+ return vmcore_add_device_dump(data);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index ce1ac9a1c878..ef59ba1ed968 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -41,8 +41,11 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS {
CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */
};
+#define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW)
+
u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag);
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag);
void cxgb4_init_ethtool_dump(struct adapter *adapter);
+int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap);
#endif /* __CXGB4_CUDBG_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 59d04d73c672..f7eef93ffc87 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -800,24 +800,20 @@ static int set_link_ksettings(struct net_device *dev,
if (base->duplex != DUPLEX_FULL)
return -EINVAL;
- if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
- /* PHY offers a single speed. See if that's what's
- * being requested.
- */
- if (base->autoneg == AUTONEG_DISABLE &&
- (lc->pcaps & speed_to_fw_caps(base->speed)))
- return 0;
- return -EINVAL;
- }
-
old_lc = *lc;
- if (base->autoneg == AUTONEG_DISABLE) {
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
+ base->autoneg == AUTONEG_DISABLE) {
fw_caps = speed_to_fw_caps(base->speed);
- if (!(lc->pcaps & fw_caps))
+ /* Must only specify a single speed which must be supported
+ * as part of the Physical Port Capabilities.
+ */
+ if ((fw_caps & (fw_caps - 1)) != 0 ||
+ !(lc->pcaps & fw_caps))
return -EINVAL;
+
lc->speed_caps = fw_caps;
- lc->acaps = 0;
+ lc->acaps = fw_caps;
} else {
fw_caps =
lmm_to_fw_caps(link_ksettings->link_modes.advertising);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index b76447baccaf..00fc5f1afb1d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -64,8 +64,7 @@ static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
if (!skb)
return -ENOMEM;
- req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req->reply_ctrl = htons(REPLY_CHAN_V(0) |
QUEUENO_V(adap->sge.fw_evtq.abs_id) |
@@ -266,6 +265,8 @@ static int validate_filter(struct net_device *dev,
fs->mask.pfvf_vld) ||
unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
fs->mask.ovlan_vld) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
+ fs->mask.encap_vld) ||
unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
return -EOPNOTSUPP;
@@ -276,8 +277,12 @@ static int validate_filter(struct net_device *dev,
* carries that overlap, we need to translate any PF/VF
* specification into that internal format below.
*/
- if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
- is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+ if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
+ (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
+ (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+ is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
return -EOPNOTSUPP;
if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
(is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
@@ -307,6 +312,9 @@ static int validate_filter(struct net_device *dev,
fs->newvlan == VLAN_REWRITE))
return -EOPNOTSUPP;
+ if (fs->val.encap_vld &&
+ CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return -EOPNOTSUPP;
return 0;
}
@@ -706,6 +714,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
*/
void clear_filter(struct adapter *adap, struct filter_entry *f)
{
+ struct port_info *pi = netdev_priv(f->dev);
+
/* If the new or old filter have loopback rewriteing rules then we'll
* need to free any existing L2T, SMT, CLIP entries of filter
* rule.
@@ -716,6 +726,12 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
+ if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
+ if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan &
+ 0x1ff].refcnt))
+ t4_free_encap_mac_filt(adap, pi->viid,
+ f->fs.val.ovlan & 0x1ff, 0);
+
if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
@@ -841,6 +857,10 @@ bool is_filter_exact_match(struct adapter *adap,
if (!is_hashfilter(adap))
return false;
+ /* Keep tunnel VNI match disabled for hash-filters for now */
+ if (fs->mask.encap_vld)
+ return false;
+
if (fs->type) {
if (is_inaddr_any(fs->val.fip, AF_INET6) ||
!is_addr_all_mask(fs->mask.fip, AF_INET6))
@@ -934,8 +954,12 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
if (tp->vnic_shift >= 0) {
- if ((adap->params.tp.ingress_config & VNIC_F) &&
- fs->mask.pfvf_vld)
+ if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
+ fs->mask.encap_vld)
+ ntuple |= (u64)((fs->val.encap_vld << 16) |
+ (fs->val.ovlan)) << tp->vnic_shift;
+ else if ((adap->params.tp.ingress_config & VNIC_F) &&
+ fs->mask.pfvf_vld)
ntuple |= (u64)((fs->val.pfvf_vld << 16) |
(fs->val.pf << 13) |
(fs->val.vf)) << tp->vnic_shift;
@@ -1049,6 +1073,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ struct port_info *pi = netdev_priv(dev);
struct tid_info *t = &adapter->tids;
struct filter_entry *f;
struct sk_buff *skb;
@@ -1115,13 +1140,34 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
f->fs.val.ovlan_vld = fs->val.pfvf_vld;
f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ } else if (iconf & USE_ENC_IDX_F) {
+ if (f->fs.val.encap_vld) {
+ struct port_info *pi = netdev_priv(f->dev);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+
+ /* allocate MPS TCAM entry */
+ ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ f->fs.val.vni,
+ f->fs.mask.vni,
+ 0, 1, 1);
+ if (ret < 0)
+ goto free_atid;
+
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ f->fs.val.ovlan = ret;
+ f->fs.mask.ovlan = 0xffff;
+ f->fs.val.ovlan_vld = 1;
+ f->fs.mask.ovlan_vld = 1;
+ }
}
size = sizeof(struct cpl_t6_act_open_req);
if (f->fs.type) {
ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
if (ret)
- goto free_atid;
+ goto free_mps;
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
@@ -1136,7 +1182,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
- goto free_atid;
+ goto free_mps;
}
mk_act_open_req(f, skb,
@@ -1152,6 +1198,10 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
free_clip:
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+free_mps:
+ if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
+ t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
+
free_atid:
cxgb4_free_atid(t, atid);
@@ -1333,6 +1383,27 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
f->fs.val.ovlan_vld = fs->val.pfvf_vld;
f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ } else if (iconf & USE_ENC_IDX_F) {
+ if (f->fs.val.encap_vld) {
+ struct port_info *pi = netdev_priv(f->dev);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+
+ /* allocate MPS TCAM entry */
+ ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ f->fs.val.vni,
+ f->fs.mask.vni,
+ 0, 1, 1);
+ if (ret < 0)
+ goto free_clip;
+
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ f->fs.val.ovlan = ret;
+ f->fs.mask.ovlan = 0x1ff;
+ f->fs.val.ovlan_vld = 1;
+ f->fs.mask.ovlan_vld = 1;
+ }
}
/* Attempt to set the filter. If we don't succeed, we clear
@@ -1349,6 +1420,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
return ret;
+
+free_clip:
+ if (is_t6(adapter->params.chip) && f->fs.type)
+ cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET, chip_ver);
+ return ret;
}
static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 005283c7cdfe..0efae2030e71 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -301,14 +301,14 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
}
}
-void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+void t4_os_portmod_changed(struct adapter *adap, int port_id)
{
static const char *mod_str[] = {
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
- const struct net_device *dev = adap->port[port_id];
- const struct port_info *pi = netdev_priv(dev);
+ struct net_device *dev = adap->port[port_id];
+ struct port_info *pi = netdev_priv(dev);
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
netdev_info(dev, "port module unplugged\n");
@@ -325,6 +325,11 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
else
netdev_info(dev, "%s: unknown module type %d inserted\n",
dev->name, pi->mod_type);
+
+ /* If the interface is running, then we'll need any "sticky" Link
+ * Parameters redone with a new Transceiver Module.
+ */
+ pi->link_cfg.redo_l1cfg = netif_running(dev);
}
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
@@ -460,7 +465,7 @@ static int link_start(struct net_device *dev)
&pi->link_cfg);
if (ret == 0) {
local_bh_disable();
- ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
+ ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
true, CXGB4_DCB_ENABLED);
local_bh_enable();
}
@@ -2339,7 +2344,8 @@ static int cxgb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
+ ret = t4_enable_pi_params(adapter, adapter->pf, pi,
+ false, false, false);
#ifdef CONFIG_CHELSIO_T4_DCB
cxgb4_dcb_reset(dev);
dcb_tx_queue_prio_enable(dev, false);
@@ -2886,13 +2892,13 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
}
/* Convert from Mbps to Kbps */
- req_rate = rate << 10;
+ req_rate = rate * 1000;
/* Max rate is 100 Gbps */
- if (req_rate >= SCHED_MAX_RATE_KBPS) {
+ if (req_rate > SCHED_MAX_RATE_KBPS) {
dev_err(adap->pdev_dev,
"Invalid rate %u Mbps, Max rate is %u Mbps\n",
- rate, SCHED_MAX_RATE_KBPS >> 10);
+ rate, SCHED_MAX_RATE_KBPS / 1000);
return -ERANGE;
}
@@ -3081,7 +3087,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
match_all_mac, match_all_mac,
adapter->rawf_start +
pi->port_id,
- 1, pi->port_id, true);
+ 1, pi->port_id, false);
if (ret < 0) {
netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
i);
@@ -3169,7 +3175,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
match_all_mac,
adapter->rawf_start +
pi->port_id,
- 1, pi->port_id, true);
+ 1, pi->port_id, false);
if (ret < 0) {
netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
be16_to_cpu(ti->port));
@@ -4135,6 +4141,10 @@ static int adap_init0(struct adapter *adap)
* card
*/
card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
+ if (!card_fw) {
+ ret = -ENOMEM;
+ goto bye;
+ }
/* Get FW from from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
@@ -4276,6 +4286,20 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ /* Read the raw mps entries. In T6, the last 2 tcam entries
+ * are reserved for raw mac addresses (rawf = 2, one per port).
+ */
+ params[0] = FW_PARAM_PFVF(RAWF_START);
+ params[1] = FW_PARAM_PFVF(RAWF_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (ret == 0) {
+ adap->rawf_start = val[0];
+ adap->rawf_cnt = val[1] - val[0] + 1;
+ }
+ }
+
/* qids (ingress/egress) returned from firmware can be anywhere
* in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
* Hence driver needs to allocate memory for this range to
@@ -5181,6 +5205,7 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
+ kvfree(adapter->mps_encap);
kvfree(adapter->smt);
kvfree(adapter->l2t);
kvfree(adapter->srq);
@@ -5216,14 +5241,11 @@ static void free_some_resources(struct adapter *adapter)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
-static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+static int t4_get_chip_type(struct adapter *adap, int ver)
{
- u16 device_id;
+ u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
- /* Retrieve adapter's device ID */
- pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
-
- switch (device_id >> 12) {
+ switch (ver) {
case CHELSIO_T4:
return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
case CHELSIO_T5:
@@ -5231,8 +5253,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
case CHELSIO_T6:
return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
default:
- dev_err(&pdev->dev, "Device %d is not supported\n",
- device_id);
+ break;
}
return -EINVAL;
}
@@ -5261,13 +5282,9 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
u32 pcie_fw;
pcie_fw = readl(adap->regs + PCIE_FW_A);
- /* Check if cxgb4 is the MASTER and fw is initialized */
- if (num_vfs &&
- (!(pcie_fw & PCIE_FW_INIT_F) ||
- !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
- dev_warn(&pdev->dev,
- "cxgb4 driver needs to be MASTER to support SRIOV\n");
+ /* Check if fw is initialized */
+ if (!(pcie_fw & PCIE_FW_INIT_F)) {
+ dev_warn(&pdev->dev, "Device not initialized\n");
return -EOPNOTSUPP;
}
@@ -5406,15 +5423,18 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int func, i, err, s_qpp, qpp, num_seg;
+ struct net_device *netdev;
+ struct adapter *adapter;
+ static int adap_idx = 1;
+ int s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
- struct adapter *adapter = NULL;
- struct net_device *netdev;
- void __iomem *regs;
- u32 whoami, pl_rev;
enum chip_type chip;
- static int adap_idx = 1;
+ void __iomem *regs;
+ int func, chip_ver;
+ u16 device_id;
+ int i, err;
+ u32 whoami;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -5450,11 +5470,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
/* We control everything through one PF */
- whoami = readl(regs + PL_WHOAMI_A);
- pl_rev = REV_G(readl(regs + PL_REV_A));
- chip = get_chip_type(pdev, pl_rev);
- func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
- SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+ whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
+ if (chip < 0) {
+ dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
+ err = chip;
+ goto out_free_adapter;
+ }
+ chip_ver = CHELSIO_CHIP_VERSION(chip);
+ func = chip_ver <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
@@ -5543,6 +5569,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_adapter;
+ if (is_kdump_kernel()) {
+ /* Collect hardware state and append to /proc/vmcore */
+ err = cxgb4_cudbg_vmcore_add_dump(adapter);
+ if (err) {
+ dev_warn(adapter->pdev_dev,
+ "Fail collecting vmcore device dump, err: %d. Continuing\n",
+ err);
+ err = 0;
+ }
+ }
if (!is_t4(adapter->params.chip)) {
s_qpp = (QUEUESPERPAGEPF0_S +
@@ -5610,8 +5646,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
- if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ if (chip_ver > CHELSIO_T5) {
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
@@ -5676,8 +5719,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
+ adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) *
+ adapter->params.arch.mps_tcam_size,
+ GFP_KERNEL);
+ if (!adapter->mps_encap)
+ dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
+
#if IS_ENABLED(CONFIG_IPV6)
- if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
+ if (chip_ver <= CHELSIO_T5 &&
(!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
/* CLIP functionality is not present in hardware,
* hence disable all offload features
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 36563364bae7..3ddd2c4acf68 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -194,6 +194,23 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.tos = mask->tos;
}
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key, *mask;
+
+ key = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ cls->key);
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ cls->mask);
+ fs->val.vni = be32_to_cpu(key->keyid);
+ fs->mask.vni = be32_to_cpu(mask->keyid);
+ if (fs->mask.vni) {
+ fs->val.encap_vld = 1;
+ fs->mask.encap_vld = 1;
+ }
+ }
+
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key, *mask;
u16 vlan_tci, vlan_tci_mask;
@@ -247,6 +264,7 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(dev, "Unsupported key used: 0x%x\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1817a0307d26..77c2c538b1fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -491,7 +491,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
if (tp->protocol_shift >= 0)
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
- if (tp->vnic_shift >= 0) {
+ if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
u32 viid = cxgb4_port_viid(dev);
u32 vf = FW_VIID_VIN_G(viid);
u32 pf = FW_VIID_PFN_G(viid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1a28df137e1f..7a271feec5e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
- void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
+ void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
@@ -708,7 +708,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
}
if (metadata)
*(void **)metadata = s;
- memset(p, 0, len);
return p;
}
@@ -1072,12 +1071,27 @@ static void *inline_tx_skb_header(const struct sk_buff *skb,
static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
- const struct iphdr *iph = ip_hdr(skb);
+ bool inner_hdr_csum = false;
+ u16 proto, ver;
- if (iph->version == 4) {
- if (iph->protocol == IPPROTO_TCP)
+ if (skb->encapsulation &&
+ (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
+ inner_hdr_csum = true;
+
+ if (inner_hdr_csum) {
+ ver = inner_ip_hdr(skb)->version;
+ proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
+ inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 4) ? ip_hdr(skb)->protocol :
+ ipv6_hdr(skb)->nexthdr;
+ }
+
+ if (ver == 4) {
+ if (proto == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP;
- else if (iph->protocol == IPPROTO_UDP)
+ else if (proto == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP;
else {
nocsum: /*
@@ -1090,19 +1104,29 @@ nocsum: /*
/*
* this doesn't work with extension headers
*/
- const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
-
- if (ip6h->nexthdr == IPPROTO_TCP)
+ if (proto == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP6;
- else if (ip6h->nexthdr == IPPROTO_UDP)
+ else if (proto == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP6;
else
goto nocsum;
}
if (likely(csum_type >= TX_CSUM_TCPIP)) {
- u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
- int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+ int eth_hdr_len, l4_len;
+ u64 hdr_len;
+
+ if (inner_hdr_csum) {
+ /* This allows checksum offload for all encapsulated
+ * packets like GRE etc..
+ */
+ l4_len = skb_inner_network_header_len(skb);
+ eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
+ } else {
+ l4_len = skb_network_header_len(skb);
+ eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+ }
+ hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
@@ -1273,7 +1297,7 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
- u64 cntrl, *end;
+ u64 cntrl, *end, *sgl;
int qidx, credits;
unsigned int flits, ndesc;
struct adapter *adap;
@@ -1386,8 +1410,9 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
+ len += sizeof(*cpl);
if (ssi->gso_size) {
- struct cpl_tx_pkt_lso *lso = (void *)wr;
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
@@ -1417,20 +1442,19 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
+ lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
+ lso->len = htonl(skb->len);
else
- lso->c.len =
- htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
if (CHELSIO_CHIP_VERSION(adap->params.chip)
@@ -1443,10 +1467,22 @@ out_free: dev_kfree_skb_any(skb);
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len);
}
+ sgl = (u64 *)(cpl + 1); /* sgl start here */
+ if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ if (sgl == (u64 *)q->q.stat) {
+ int left = (u8 *)end - (u8 *)q->q.stat;
+
+ end = (void *)q->q.desc + left;
+ sgl = (void *)q->q.desc;
+ }
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- len += sizeof(*cpl);
if (ptp_enabled)
op = FW_PTP_TX_PKT_WR;
else
@@ -1454,6 +1490,7 @@ out_free: dev_kfree_skb_any(skb);
wr->op_immdlen = htonl(FW_WR_OP_V(op) |
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
+ sgl = (u64 *)(cpl + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(adap->params.chip, skb) |
TXPKT_IPCSUM_DIS_F;
@@ -1487,20 +1524,19 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
int last_desc;
- cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
- end, 0, addr);
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
}
txq_advance(&q->q, ndesc);
@@ -2259,7 +2295,7 @@ static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
}
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
- const struct cpl_rx_pkt *pkt)
+ const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
{
struct adapter *adapter = rxq->rspq.adap;
struct sge *s = &adapter->sge;
@@ -2275,6 +2311,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
}
copy_frags(skb, gl, s->pktshift);
+ if (tnl_hdr_len)
+ skb->csum_level = 1;
skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
@@ -2406,7 +2444,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
- u16 err_vec;
+ u16 err_vec, tnl_hdr_len = 0;
struct port_info *pi;
int ret = 0;
@@ -2415,16 +2453,19 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
pkt = (const struct cpl_rx_pkt *)rsp;
/* Compressed error vector is enabled for T6 only */
- if (q->adap->params.tp.rx_pkt_encap)
+ if (q->adap->params.tp.rx_pkt_encap) {
err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
- else
+ tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
+ } else {
err_vec = be16_to_cpu(pkt->err_vec);
+ }
csum_ok = pkt->csum_calc && !err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
- if ((pkt->l2info & htonl(RXF_TCP_F)) &&
+ if (((pkt->l2info & htonl(RXF_TCP_F)) ||
+ tnl_hdr_len) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
- do_gro(rxq, si, pkt);
+ do_gro(rxq, si, pkt, tnl_hdr_len);
return 0;
}
@@ -2471,7 +2512,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
} else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
- skb->ip_summed = CHECKSUM_COMPLETE;
+
+ if (tnl_hdr_len) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = 1;
+ } else {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
rxq->stats.rx_cso++;
}
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 6228a5708307..82b70a565e24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -84,8 +84,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
if (!skb)
return -ENOMEM;
req = (struct cpl_srq_table_req *)
- __skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ __skb_put_zero(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
TID_TID_V(srq_idx) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h
index 54b718111e3f..721c77577ec5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h
@@ -34,6 +34,8 @@
#ifndef __T4_CHIP_TYPE_H__
#define __T4_CHIP_TYPE_H__
+#define CHELSIO_PCI_ID_VER(__DeviceID) ((__DeviceID) >> 12)
+
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 7cb3ef466cc7..974a868a4824 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3941,8 +3941,9 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
CAP16_TO_CAP32(FC_RX);
CAP16_TO_CAP32(FC_TX);
CAP16_TO_CAP32(ANEG);
- CAP16_TO_CAP32(MDIX);
+ CAP16_TO_CAP32(FORCE_PAUSE);
CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(MDISTRAIGHT);
CAP16_TO_CAP32(FEC_RS);
CAP16_TO_CAP32(FEC_BASER_RS);
CAP16_TO_CAP32(802_3_PAUSE);
@@ -3982,8 +3983,9 @@ static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
CAP32_TO_CAP16(802_3_PAUSE);
CAP32_TO_CAP16(802_3_ASM_DIR);
CAP32_TO_CAP16(ANEG);
- CAP32_TO_CAP16(MDIX);
+ CAP32_TO_CAP16(FORCE_PAUSE);
CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(MDISTRAIGHT);
CAP32_TO_CAP16(FEC_RS);
CAP32_TO_CAP16(FEC_BASER_RS);
@@ -4014,6 +4016,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
fw_pause |= FW_PORT_CAP32_FC_RX;
if (cc_pause & PAUSE_TX)
fw_pause |= FW_PORT_CAP32_FC_TX;
+ if (!(cc_pause & PAUSE_AUTONEG))
+ fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
return fw_pause;
}
@@ -4058,14 +4062,17 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
-int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
- unsigned int port, struct link_config *lc)
+int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+ bool sleep_ok, int timeout)
{
unsigned int fw_caps = adapter->params.fw_caps_support;
- struct fw_port_cmd cmd;
- unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+ struct fw_port_cmd cmd;
+ unsigned int fw_mdi;
+ int ret;
+ fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
/* Convert driver coding of Pause Frame Flow Control settings into the
* Firmware's API.
*/
@@ -4087,7 +4094,7 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
/* Figure out what our Requested Port Capabilities are going to be.
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
- rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
+ rcap = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
@@ -4098,6 +4105,17 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
+ /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
+ * we need to exclude this from this check in order to maintain
+ * compatibility ...
+ */
+ if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
+ dev_err(adapter->pdev_dev,
+ "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
+ rcap, lc->pcaps);
+ return -EINVAL;
+ }
+
/* And send that on to the Firmware ...
*/
memset(&cmd, 0, sizeof(cmd));
@@ -4108,12 +4126,21 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
? FW_PORT_ACTION_L1_CFG
: FW_PORT_ACTION_L1_CFG32) |
- FW_LEN16(cmd));
+ FW_LEN16(cmd));
if (fw_caps == FW_CAPS16)
cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
else
cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
- return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+
+ ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
+ sleep_ok, timeout);
+ if (ret) {
+ dev_err(adapter->pdev_dev,
+ "Requested Port Capabilities %#x rejected, error %d\n",
+ rcap, -ret);
+ return ret;
+ }
+ return ret;
}
/**
@@ -7513,6 +7540,43 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_encap_mac_filt - frees MPS entry at given index
+ * @adap: the adapter
+ * @viid: the VI id
+ * @idx: index of MPS entry to be freed
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Frees the MPS entry at supplied index
+ *
+ * Returns a negative error number or zero on success
+ */
+int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ int idx, bool sleep_ok)
+{
+ struct fw_vi_mac_exact *p;
+ u8 addr[] = {0, 0, 0, 0, 0, 0};
+ struct fw_vi_mac_cmd c;
+ int ret = 0;
+ u32 exact;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ exact |
+ FW_CMD_LEN16_V(1));
+ p = c.u.exact;
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ return ret;
+}
+
+/**
* t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
* @adap: the adapter
* @viid: the VI id
@@ -7563,6 +7627,55 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
}
/**
+ * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @vni: the VNI id for the tunnel protocol
+ * @vni_mask: mask for the VNI id
+ * @dip_hit: to enable DIP match for the MPS entry
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an MPS entry with specified MAC address and VNI value.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int vni,
+ unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
+ bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_vni *p = c.u.exact_vni;
+ int ret = 0;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+ memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
+
+ p->lookup_type_to_vni =
+ cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
+ FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
+ FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
+ p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0)
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
+ return ret;
+}
+
+/**
* t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
* @adap: the adapter
* @viid: the VI id
@@ -7909,6 +8022,34 @@ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_enable_pi_params - enable/disable a Port's Virtual Interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pi: the Port Information structure
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ * @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ * Enables/disables a Port's Virtual Interface. Note that setting DCB
+ * Enable only makes sense when enabling a Virtual Interface ...
+ * If the Virtual Interface enable/disable operation is successful,
+ * we notify the OS-specific code of a potential Link Status change
+ * via the OS Contract API t4_os_link_changed().
+ */
+int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
+ struct port_info *pi,
+ bool rx_en, bool tx_en, bool dcb_en)
+{
+ int ret = t4_enable_vi_params(adap, mbox, pi->viid,
+ rx_en, tx_en, dcb_en);
+ if (ret)
+ return ret;
+ t4_os_link_changed(adap, pi->port_id,
+ rx_en && tx_en && pi->link_cfg.link_ok);
+ return 0;
+}
+
+/**
* t4_identify_port - identify a VI's port by blinking its LED
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -8249,6 +8390,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
+ lc->new_module = false;
+ lc->redo_l1cfg = false;
+
if (mod_type != pi->mod_type) {
/* With the newer SFP28 and QSFP28 Transceiver Module Types,
* various fundamental Port Capabilities which used to be
@@ -8283,6 +8427,8 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
pi->port_type = port_type;
pi->mod_type = mod_type;
+
+ lc->new_module = t4_is_inserted_mod_type(mod_type);
t4_os_portmod_changed(adapter, pi->port_id);
}
@@ -8301,7 +8447,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->lpacaps = lpacaps;
lc->acaps = acaps & ADVERT_MASK;
- if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
+ lc->autoneg = AUTONEG_DISABLE;
+ } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
lc->autoneg = AUTONEG_ENABLE;
} else {
/* When Autoneg is disabled, user needs to set
@@ -8315,6 +8463,26 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
t4_os_link_changed(adapter, pi->port_id, link_ok);
}
+
+ if (lc->new_module && lc->redo_l1cfg) {
+ struct link_config old_lc;
+ int ret;
+
+ /* Save the current L1 Configuration and restore it if an
+ * error occurs. We probably should fix the l1_cfg*()
+ * routines not to change the link_config when an error
+ * occurs ...
+ */
+ old_lc = *lc;
+ ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
+ if (ret) {
+ *lc = old_lc;
+ dev_warn(adapter->pdev_dev,
+ "Attempt to update new Transceiver Module settings failed\n");
+ }
+ }
+ lc->new_module = false;
+ lc->redo_l1cfg = false;
}
/**
@@ -8486,6 +8654,13 @@ static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
lc->requested_fec = FEC_AUTO;
lc->fec = fwcap_to_cc_fec(lc->def_acaps);
+ /* If the Port is capable of Auto-Negtotiation, initialize it as
+ * "enabled" and copy over all of the Physical Port Capabilities
+ * to the Advertised Port Capabilities. Otherwise mark it as
+ * Auto-Negotiate disabled and select the highest supported speed
+ * for the link. Note parallel structure in t4_link_l1cfg_core()
+ * and t4_handle_get_port_info().
+ */
if (lc->pcaps & FW_PORT_CAP32_ANEG) {
lc->acaps = lc->pcaps & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
@@ -8493,6 +8668,7 @@ static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
} else {
lc->acaps = 0;
lc->autoneg = AUTONEG_DISABLE;
+ lc->speed_caps = fwcap_to_fwspeed(acaps);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fe2029e993a2..09e38f0733bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1233,6 +1233,11 @@ struct cpl_rx_pkt {
#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S)
#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U)
+#define T6_RX_TNLHDR_LEN_S 8
+#define T6_RX_TNLHDR_LEN_M 0xFF
+#define T6_RX_TNLHDR_LEN_V(x) ((x) << T6_RX_TNLHDR_LEN_S)
+#define T6_RX_TNLHDR_LEN_G(x) (((x) >> T6_RX_TNLHDR_LEN_S) & T6_RX_TNLHDR_LEN_M)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 51b18035d691..c7f8d0441278 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -145,6 +145,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -184,6 +187,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
/* T6 adapters:
*/
@@ -208,6 +212,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 276fdf214b75..6b55aa2eb2a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1598,6 +1598,10 @@
#define VNIC_V(x) ((x) << VNIC_S)
#define VNIC_F VNIC_V(1U)
+#define USE_ENC_IDX_S 13
+#define USE_ENC_IDX_V(x) ((x) << USE_ENC_IDX_S)
+#define USE_ENC_IDX_F USE_ENC_IDX_V(1U)
+
#define CSUM_HAS_PSEUDO_HDR_S 10
#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
@@ -2995,6 +2999,7 @@
#define LE_DB_HASH_TID_BASE_A 0x19c30
#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_CLCAM_TID_BASE_A 0x19df4
#define LE_DB_TID_HASHBASE_A 0x19df8
#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index e3d4751f21ac..f1967cf6d43c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1305,6 +1305,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+ FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36,
+ FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
};
@@ -2156,6 +2158,14 @@ struct fw_vi_mac_cmd {
__be64 data0m_pkd;
__be32 data1m[2];
} raw;
+ struct fw_vi_mac_vni {
+ __be16 valid_to_idx;
+ __u8 macaddr[6];
+ __be16 r7;
+ __u8 macaddr_mask[6];
+ __be32 lookup_type_to_vni;
+ __be32 vni_mask_pkd;
+ } exact_vni[2];
} u;
};
@@ -2203,6 +2213,32 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_S 31
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_M 0x1
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_V(x) ((x) << FW_VI_MAC_CMD_LOOKUP_TYPE_S)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_LOOKUP_TYPE_S) & FW_VI_MAC_CMD_LOOKUP_TYPE_M)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_F FW_VI_MAC_CMD_LOOKUP_TYPE_V(1U)
+
+#define FW_VI_MAC_CMD_DIP_HIT_S 30
+#define FW_VI_MAC_CMD_DIP_HIT_M 0x1
+#define FW_VI_MAC_CMD_DIP_HIT_V(x) ((x) << FW_VI_MAC_CMD_DIP_HIT_S)
+#define FW_VI_MAC_CMD_DIP_HIT_G(x) \
+ (((x) >> FW_VI_MAC_CMD_DIP_HIT_S) & FW_VI_MAC_CMD_DIP_HIT_M)
+#define FW_VI_MAC_CMD_DIP_HIT_F FW_VI_MAC_CMD_DIP_HIT_V(1U)
+
+#define FW_VI_MAC_CMD_VNI_S 0
+#define FW_VI_MAC_CMD_VNI_M 0xffffff
+#define FW_VI_MAC_CMD_VNI_V(x) ((x) << FW_VI_MAC_CMD_VNI_S)
+#define FW_VI_MAC_CMD_VNI_G(x) \
+ (((x) >> FW_VI_MAC_CMD_VNI_S) & FW_VI_MAC_CMD_VNI_M)
+
+#define FW_VI_MAC_CMD_VNI_MASK_S 0
+#define FW_VI_MAC_CMD_VNI_MASK_M 0xffffff
+#define FW_VI_MAC_CMD_VNI_MASK_V(x) ((x) << FW_VI_MAC_CMD_VNI_MASK_S)
+#define FW_VI_MAC_CMD_VNI_MASK_G(x) \
+ (((x) >> FW_VI_MAC_CMD_VNI_MASK_S) & FW_VI_MAC_CMD_VNI_MASK_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
@@ -2435,11 +2471,11 @@ enum fw_port_cap {
FW_PORT_CAP_FC_RX = 0x0040,
FW_PORT_CAP_FC_TX = 0x0080,
FW_PORT_CAP_ANEG = 0x0100,
- FW_PORT_CAP_MDIX = 0x0200,
- FW_PORT_CAP_MDIAUTO = 0x0400,
+ FW_PORT_CAP_MDIAUTO = 0x0200,
+ FW_PORT_CAP_MDISTRAIGHT = 0x0400,
FW_PORT_CAP_FEC_RS = 0x0800,
FW_PORT_CAP_FEC_BASER_RS = 0x1000,
- FW_PORT_CAP_FEC_RESERVED = 0x2000,
+ FW_PORT_CAP_FORCE_PAUSE = 0x2000,
FW_PORT_CAP_802_3_PAUSE = 0x4000,
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
@@ -2479,14 +2515,15 @@ enum fw_port_mdi {
#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL
#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL
#define FW_PORT_CAP32_ANEG 0x00100000UL
-#define FW_PORT_CAP32_MDIX 0x00200000UL
-#define FW_PORT_CAP32_MDIAUTO 0x00400000UL
+#define FW_PORT_CAP32_MDIAUTO 0x00200000UL
+#define FW_PORT_CAP32_MDISTRAIGHT 0x00400000UL
#define FW_PORT_CAP32_FEC_RS 0x00800000UL
#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL
#define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL
#define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL
#define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL
-#define FW_PORT_CAP32_RESERVED2 0xf0000000UL
+#define FW_PORT_CAP32_FORCE_PAUSE 0x10000000UL
+#define FW_PORT_CAP32_RESERVED2 0xe0000000UL
#define FW_PORT_CAP32_SPEED_S 0
#define FW_PORT_CAP32_SPEED_M 0xfff
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 123e2c1b65f5..4eb15ceddca3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x3F
+#define T4FW_VERSION_MINOR 0x13
+#define T4FW_VERSION_MICRO 0x01
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x3F
+#define T5FW_VERSION_MINOR 0x13
+#define T5FW_VERSION_MICRO 0x01
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x3F
+#define T6FW_VERSION_MINOR 0x13
+#define T6FW_VERSION_MICRO 0x01
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9a81b52307a9..ff84791a0ff8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -274,7 +274,7 @@ static int link_start(struct net_device *dev)
* is enabled on a port.
*/
if (ret == 0)
- ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
+ ret = t4vf_enable_pi(pi->adapter, pi, true, true);
/* The Virtual Interfaces are connected to an internal switch on the
* chip which allows VIs attached to the same port to talk to each
@@ -822,8 +822,7 @@ static int cxgb4vf_stop(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- t4vf_enable_vi(adapter, pi->viid, false, false);
- pi->link_cfg.link_ok = 0;
+ t4vf_enable_pi(adapter, pi, false, false);
clear_bit(pi->port_id, &adapter->open_device_map);
if (adapter->open_device_map == 0)
@@ -1419,6 +1418,22 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev,
base->duplex = DUPLEX_UNKNOWN;
}
+ if (pi->link_cfg.fc & PAUSE_RX) {
+ if (pi->link_cfg.fc & PAUSE_TX) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ Pause);
+ } else {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ Asym_Pause);
+ }
+ } else if (pi->link_cfg.fc & PAUSE_TX) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ Asym_Pause);
+ }
+
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index dfce5df7538e..3007e1ac1e61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
* Allocate the hardware ring and PCI DMA bus address space for said.
*/
size_t hwlen = nelem * hwsize + stat_size;
- void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+ void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
if (!hwring)
return NULL;
@@ -776,11 +776,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
*(void **)swringp = swring;
}
- /*
- * Zero out the hardware ring and return its address as our function
- * value.
- */
- memset(hwring, 0, hwlen);
return hwring;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 712e8f0c71b4..ccca67cf4487 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -391,7 +391,10 @@ int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
int t4vf_alloc_vi(struct adapter *, int);
int t4vf_free_vi(struct adapter *, int);
-int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
+int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, bool rx_en,
+ bool tx_en);
+int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi, bool rx_en,
+ bool tx_en);
int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 798695bf8678..5b8c08cf523f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -341,8 +341,8 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
CAP16_TO_CAP32(FC_RX);
CAP16_TO_CAP32(FC_TX);
CAP16_TO_CAP32(ANEG);
- CAP16_TO_CAP32(MDIX);
CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(MDISTRAIGHT);
CAP16_TO_CAP32(FEC_RS);
CAP16_TO_CAP32(FEC_BASER_RS);
CAP16_TO_CAP32(802_3_PAUSE);
@@ -405,6 +405,36 @@ static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
return 0;
}
+/**
+ * fwcap_to_fwspeed - return highest speed in Port Capabilities
+ * @acaps: advertised Port Capabilities
+ *
+ * Get the highest speed for the port from the advertised Port
+ * Capabilities. It will be either the highest speed from the list of
+ * speeds or whatever user has set using ethtool.
+ */
+static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
+{
+ #define TEST_SPEED_RETURN(__caps_speed) \
+ do { \
+ if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
+ return FW_PORT_CAP32_SPEED_##__caps_speed; \
+ } while (0)
+
+ TEST_SPEED_RETURN(400G);
+ TEST_SPEED_RETURN(200G);
+ TEST_SPEED_RETURN(100G);
+ TEST_SPEED_RETURN(50G);
+ TEST_SPEED_RETURN(40G);
+ TEST_SPEED_RETURN(25G);
+ TEST_SPEED_RETURN(10G);
+ TEST_SPEED_RETURN(1G);
+ TEST_SPEED_RETURN(100M);
+
+ #undef TEST_SPEED_RETURN
+ return 0;
+}
+
/*
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
@@ -431,6 +461,13 @@ static void init_link_config(struct link_config *lc,
lc->requested_fec = FEC_AUTO;
lc->fec = lc->auto_fec;
+ /* If the Port is capable of Auto-Negtotiation, initialize it as
+ * "enabled" and copy over all of the Physical Port Capabilities
+ * to the Advertised Port Capabilities. Otherwise mark it as
+ * Auto-Negotiate disabled and select the highest supported speed
+ * for the link. Note parallel structure in t4_link_l1cfg_core()
+ * and t4_handle_get_port_info().
+ */
if (lc->pcaps & FW_PORT_CAP32_ANEG) {
lc->acaps = acaps & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
@@ -438,6 +475,7 @@ static void init_link_config(struct link_config *lc,
} else {
lc->acaps = 0;
lc->autoneg = AUTONEG_DISABLE;
+ lc->speed_caps = fwcap_to_fwspeed(acaps);
}
}
@@ -1362,6 +1400,30 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
}
/**
+ * t4vf_enable_pi - enable/disable a Port's virtual interface
+ * @adapter: the adapter
+ * @pi: the Port Information structure
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a Port's virtual interface. If the Virtual
+ * Interface enable/disable operation is successful, we notify the
+ * OS-specific code of a potential Link Status change via the OS Contract
+ * API t4vf_os_link_changed().
+ */
+int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi,
+ bool rx_en, bool tx_en)
+{
+ int ret = t4vf_enable_vi(adapter, pi->viid, rx_en, tx_en);
+
+ if (ret)
+ return ret;
+ t4vf_os_link_changed(adapter, pi->pidx,
+ rx_en && tx_en && pi->link_cfg.link_ok);
+ return 0;
+}
+
+/**
* t4vf_identify_port - identify a VI's port by blinking its LED
* @adapter: the adapter
* @viid: the Virtual Interface ID
@@ -1955,7 +2017,14 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
lc->lpacaps = lpacaps;
lc->acaps = acaps & ADVERT_MASK;
- if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ /* If we're not physically capable of Auto-Negotiation, note
+ * this as Auto-Negotiation disabled. Otherwise, we track
+ * what Auto-Negotiation settings we have. Note parallel
+ * structure in init_link_config().
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ lc->autoneg = AUTONEG_DISABLE;
+ } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
lc->autoneg = AUTONEG_ENABLE;
} else {
/* When Autoneg is disabled, user needs to set
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
index 4b5aacc09cab..240ba9d4c399 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -90,8 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
{
struct cpl_tid_release *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
@@ -104,8 +103,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_close_con_req *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
@@ -119,8 +117,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_abort_req *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
@@ -134,8 +131,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
{
struct cpl_abort_rpl *rpl;
- rpl = __skb_put(skb, len);
- memset(rpl, 0, len);
+ rpl = __skb_put_zero(skb, len);
INIT_TP_WR(rpl, tid);
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
@@ -149,8 +145,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_rx_data_ack *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 869006c2002d..f42f7a6e1559 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -476,18 +476,28 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
{
+ u8 rss_hash_type = 0;
cmd->data = 0;
+ spin_lock_bh(&enic->devcmd_lock);
+ (void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
+ spin_unlock_bh(&enic->devcmd_lock);
switch (cmd->flow_type) {
case TCP_V6_FLOW:
case TCP_V4_FLOW:
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST;
+ break;
case UDP_V6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case UDP_V4_FLOW:
- if (vnic_dev_capable_udp_rss(enic->vdev))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8a8b12b720ef..30d2eaa18c04 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2320,16 +2320,24 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
const u8 rss_default_cpu = 0;
- u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
const u8 rss_hash_bits = 7;
const u8 rss_base_cpu = 0;
+ u8 rss_hash_type;
+ int res;
u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
- if (vnic_dev_capable_udp_rss(enic->vdev))
- rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP;
+ spin_lock_bh(&enic->devcmd_lock);
+ res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
+ spin_unlock_bh(&enic->devcmd_lock);
+ if (res) {
+ /* defaults for old adapters
+ */
+ rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
+
if (rss_enable) {
if (!enic_set_rsskey(enic)) {
if (enic_set_rsscpu(enic, rss_hash_bits)) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 9c96911fb2c8..40b20817ddd5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -149,6 +149,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
+ enum vnic_devcmd_cmd cmd = CMD_NIC_CFG;
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
@@ -160,7 +161,11 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
a0 = nic_cfg;
a1 = 0;
- return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+ if (rss_hash_type & (NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_UDP_IPV6))
+ cmd = CMD_NIC_CFG_CHK;
+
+ return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
}
int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 76cdd4c9d11f..e9db811df59c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1282,19 +1282,23 @@ int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
return ret;
}
-bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev)
+int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type)
{
u64 a0 = CMD_NIC_CFG, a1 = 0;
- u64 rss_hash_type;
int wait = 1000;
int err;
err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
- if (err || !a0)
- return false;
+ /* rss_hash_type is valid only when a0 is 1. Adapter which does not
+ * support CMD_CAPABILITY for rss_hash_type has a0 = 0
+ */
+ if (err || (a0 != 1))
+ return -EOPNOTSUPP;
+
+ a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) &
+ NIC_CFG_RSS_HASH_TYPE_MASK_FIELD;
- rss_hash_type = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) &
- NIC_CFG_RSS_HASH_TYPE_MASK_FIELD;
+ *rss_hash_type = (u8)a1;
- return (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP);
+ return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 59d4cc8fbb85..714fc1ed79e3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -184,6 +184,6 @@ int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
u64 *supported_versions, u64 *a1);
-bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev);
+int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 41de4ba622a1..fef5a0a0663d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -148,8 +148,26 @@ enum vnic_devcmd_cmd {
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
- /* nic_cfg in (u32)a0 */
+ /* nic_cfg (no wait, always succeeds)
+ * in: (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0 = 1 if a1 is valid
+ * (u64) a1 = (NIC_CFG bits supported) | (flags << 32)
+ *
+ * flags are CMD_NIC_CFG_CAPF_xxx
+ */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+ /* nic_cfg_chk (will return error if flags are invalid)
+ * in: (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0 = 1 if a1 is valid
+ * (u64) a1 = (NIC_CFG bits supported) | (flags << 32)
+ *
+ * flags are CMD_NIC_CFG_CAPF_xxx
+ */
+ CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 5a93db0d7afc..84ff8ca17fcb 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -41,13 +41,14 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0)
#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
-#define NIC_CFG_RSS_HASH_TYPE_UDP (1 << 7)
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index bd3f6e4d1341..ff9eb45f67f8 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -539,7 +539,7 @@ static int gmac_setup_txqs(struct net_device *netdev)
}
if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
- dev_warn(geth->dev, "TX queue base it not aligned\n");
+ dev_warn(geth->dev, "TX queue base is not aligned\n");
kfree(skb_tab);
return -ENOMEM;
}
@@ -680,7 +680,7 @@ static int gmac_setup_rxq(struct net_device *netdev)
if (!port->rxq_ring)
return -ENOMEM;
if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
- dev_warn(geth->dev, "RX queue base it not aligned\n");
+ dev_warn(geth->dev, "RX queue base is not aligned\n");
return -ENOMEM;
}
@@ -905,7 +905,7 @@ static int geth_setup_freeq(struct gemini_ethernet *geth)
if (!geth->freeq_ring)
return -ENOMEM;
if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
- dev_warn(geth->dev, "queue ring base it not aligned\n");
+ dev_warn(geth->dev, "queue ring base is not aligned\n");
goto err_freeq;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8bb0db990c8f..00a57273b753 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1246,8 +1246,7 @@ error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
free2:
- if (priv->clk)
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
free:
free_netdev(netdev);
out:
@@ -1271,8 +1270,7 @@ static int ethoc_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
}
- if (priv->clk)
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6e490fd2345d..a580a3dcbe59 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -22,7 +22,7 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28)
+ ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
imply PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index ed8ad0fefbda..0914a3ea4405 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
-obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index e7381f8ef89d..4778b663653e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -21,7 +21,7 @@
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9d3eed46830d..c729665107f5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2053,13 +2053,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->parent = &pdev->dev;
node = of_get_child_by_name(pdev->dev.of_node, "mdio");
- if (node) {
- err = of_mdiobus_register(fep->mii_bus, node);
+ err = of_mdiobus_register(fep->mii_bus, node);
+ if (node)
of_node_put(node);
- } else {
- err = mdiobus_register(fep->mii_bus);
- }
-
if (err)
goto err_out_free_mdiobus;
@@ -2112,7 +2108,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -3518,7 +3514,7 @@ fec_probe(struct platform_device *pdev)
goto failed_init;
for (i = 0; i < irq_cnt; i++) {
- sprintf(irq_name, "int%d", i);
+ snprintf(irq_name, sizeof(irq_name), "int%d", i);
irq = platform_get_irq_byname(pdev, irq_name);
if (irq < 0)
irq = platform_get_irq(pdev, i);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 43d973215040..36c2d7d6ee1b 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -454,12 +454,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
-/**
- * fec_ptp_hwtstamp_ioctl - control hardware time stamping
- * @ndev: pointer to net_device
- * @ifreq: ioctl data
- * @cmd: particular ioctl requested
- */
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 6552d68ea6e1..ce6e24c74978 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1391,12 +1391,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
* workaround
*/
- if (port->rev_info.major >= 6) {
- u32 reg;
+ u32 reg;
- reg = 0x00001013;
- iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
- }
+ reg = 0x00001013;
+ iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5aa814799d70..8e42c0246611 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1372,7 +1372,4 @@ struct filer_table {
struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
};
-/* The gianfar_ptp module will set this variable */
-extern int gfar_phc_index;
-
#endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index a93e0199c369..8cb98cae0a6f 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -41,6 +41,8 @@
#include <linux/phy.h>
#include <linux/sort.h>
#include <linux/if_vlan.h>
+#include <linux/of_platform.h>
+#include <linux/fsl/ptp_qoriq.h>
#include "gianfar.h"
@@ -1509,24 +1511,35 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-int gfar_phc_index = -1;
-EXPORT_SYMBOL(gfar_phc_index);
-
static int gfar_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct gfar_private *priv = netdev_priv(dev);
+ struct platform_device *ptp_dev;
+ struct device_node *ptp_node;
+ struct qoriq_ptp *ptp = NULL;
+
+ info->phc_index = -1;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
return 0;
}
+
+ ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
+ if (ptp_node) {
+ ptp_dev = of_find_device_by_node(ptp_node);
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+ }
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = gfar_phc_index;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
deleted file mode 100644
index 9f8d4f8e57e3..000000000000
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * PTP 1588 clock using the eTSEC
- *
- * Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/device.h>
-#include <linux/hrtimer.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/timex.h>
-#include <linux/io.h>
-
-#include <linux/ptp_clock_kernel.h>
-
-#include "gianfar.h"
-
-/*
- * gianfar ptp registers
- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
- */
-struct gianfar_ptp_registers {
- u32 tmr_ctrl; /* Timer control register */
- u32 tmr_tevent; /* Timestamp event register */
- u32 tmr_temask; /* Timer event mask register */
- u32 tmr_pevent; /* Timestamp event register */
- u32 tmr_pemask; /* Timer event mask register */
- u32 tmr_stat; /* Timestamp status register */
- u32 tmr_cnt_h; /* Timer counter high register */
- u32 tmr_cnt_l; /* Timer counter low register */
- u32 tmr_add; /* Timer drift compensation addend register */
- u32 tmr_acc; /* Timer accumulator register */
- u32 tmr_prsc; /* Timer prescale */
- u8 res1[4];
- u32 tmroff_h; /* Timer offset high */
- u32 tmroff_l; /* Timer offset low */
- u8 res2[8];
- u32 tmr_alarm1_h; /* Timer alarm 1 high register */
- u32 tmr_alarm1_l; /* Timer alarm 1 high register */
- u32 tmr_alarm2_h; /* Timer alarm 2 high register */
- u32 tmr_alarm2_l; /* Timer alarm 2 high register */
- u8 res3[48];
- u32 tmr_fiper1; /* Timer fixed period interval */
- u32 tmr_fiper2; /* Timer fixed period interval */
- u32 tmr_fiper3; /* Timer fixed period interval */
- u8 res4[20];
- u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
- u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
- u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
- u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
-};
-
-/* Bit definitions for the TMR_CTRL register */
-#define ALM1P (1<<31) /* Alarm1 output polarity */
-#define ALM2P (1<<30) /* Alarm2 output polarity */
-#define FIPERST (1<<28) /* FIPER start indication */
-#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
-#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
-#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
-#define TCLK_PERIOD_MASK (0x3ff)
-#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
-#define FRD (1<<14) /* FIPER Realignment Disable */
-#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
-#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
-#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
-#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
-#define COPH (1<<7) /* Generated clock output phase. */
-#define CIPH (1<<6) /* External oscillator input clock phase */
-#define TMSR (1<<5) /* Timer soft reset. */
-#define BYP (1<<3) /* Bypass drift compensated clock */
-#define TE (1<<2) /* 1588 timer enable. */
-#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
-#define CKSEL_MASK (0x3)
-
-/* Bit definitions for the TMR_TEVENT register */
-#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
-#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
-#define ALM2 (1<<17) /* Current time = alarm time register 2 */
-#define ALM1 (1<<16) /* Current time = alarm time register 1 */
-#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
-#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
-#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
-
-/* Bit definitions for the TMR_TEMASK register */
-#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
-#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
-#define ALM2EN (1<<17) /* Timer ALM2 event enable */
-#define ALM1EN (1<<16) /* Timer ALM1 event enable */
-#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
-#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
-
-/* Bit definitions for the TMR_PEVENT register */
-#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
-#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
-#define RXP (1<<0) /* PTP frame has been received */
-
-/* Bit definitions for the TMR_PEMASK register */
-#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
-#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
-#define RXPEN (1<<0) /* Receive PTP packet event enable */
-
-/* Bit definitions for the TMR_STAT register */
-#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
-#define STAT_VEC_MASK (0x3f)
-
-/* Bit definitions for the TMR_PRSC register */
-#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
-#define PRSC_OCK_MASK (0xffff)
-
-
-#define DRIVER "gianfar_ptp"
-#define DEFAULT_CKSEL 1
-#define N_EXT_TS 2
-#define REG_SIZE sizeof(struct gianfar_ptp_registers)
-
-struct etsects {
- struct gianfar_ptp_registers __iomem *regs;
- spinlock_t lock; /* protects regs */
- struct ptp_clock *clock;
- struct ptp_clock_info caps;
- struct resource *rsrc;
- int irq;
- u64 alarm_interval; /* for periodic alarm */
- u64 alarm_value;
- u32 tclk_period; /* nanoseconds */
- u32 tmr_prsc;
- u32 tmr_add;
- u32 cksel;
- u32 tmr_fiper1;
- u32 tmr_fiper2;
-};
-
-/*
- * Register access functions
- */
-
-/* Caller must hold etsects->lock. */
-static u64 tmr_cnt_read(struct etsects *etsects)
-{
- u64 ns;
- u32 lo, hi;
-
- lo = gfar_read(&etsects->regs->tmr_cnt_l);
- hi = gfar_read(&etsects->regs->tmr_cnt_h);
- ns = ((u64) hi) << 32;
- ns |= lo;
- return ns;
-}
-
-/* Caller must hold etsects->lock. */
-static void tmr_cnt_write(struct etsects *etsects, u64 ns)
-{
- u32 hi = ns >> 32;
- u32 lo = ns & 0xffffffff;
-
- gfar_write(&etsects->regs->tmr_cnt_l, lo);
- gfar_write(&etsects->regs->tmr_cnt_h, hi);
-}
-
-/* Caller must hold etsects->lock. */
-static void set_alarm(struct etsects *etsects)
-{
- u64 ns;
- u32 lo, hi;
-
- ns = tmr_cnt_read(etsects) + 1500000000ULL;
- ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
- ns -= etsects->tclk_period;
- hi = ns >> 32;
- lo = ns & 0xffffffff;
- gfar_write(&etsects->regs->tmr_alarm1_l, lo);
- gfar_write(&etsects->regs->tmr_alarm1_h, hi);
-}
-
-/* Caller must hold etsects->lock. */
-static void set_fipers(struct etsects *etsects)
-{
- set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
- gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
-}
-
-/*
- * Interrupt service routine
- */
-
-static irqreturn_t isr(int irq, void *priv)
-{
- struct etsects *etsects = priv;
- struct ptp_clock_event event;
- u64 ns;
- u32 ack = 0, lo, hi, mask, val;
-
- val = gfar_read(&etsects->regs->tmr_tevent);
-
- if (val & ETS1) {
- ack |= ETS1;
- hi = gfar_read(&etsects->regs->tmr_etts1_h);
- lo = gfar_read(&etsects->regs->tmr_etts1_l);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
- ptp_clock_event(etsects->clock, &event);
- }
-
- if (val & ETS2) {
- ack |= ETS2;
- hi = gfar_read(&etsects->regs->tmr_etts2_h);
- lo = gfar_read(&etsects->regs->tmr_etts2_l);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
- ptp_clock_event(etsects->clock, &event);
- }
-
- if (val & ALM2) {
- ack |= ALM2;
- if (etsects->alarm_value) {
- event.type = PTP_CLOCK_ALARM;
- event.index = 0;
- event.timestamp = etsects->alarm_value;
- ptp_clock_event(etsects->clock, &event);
- }
- if (etsects->alarm_interval) {
- ns = etsects->alarm_value + etsects->alarm_interval;
- hi = ns >> 32;
- lo = ns & 0xffffffff;
- spin_lock(&etsects->lock);
- gfar_write(&etsects->regs->tmr_alarm2_l, lo);
- gfar_write(&etsects->regs->tmr_alarm2_h, hi);
- spin_unlock(&etsects->lock);
- etsects->alarm_value = ns;
- } else {
- gfar_write(&etsects->regs->tmr_tevent, ALM2);
- spin_lock(&etsects->lock);
- mask = gfar_read(&etsects->regs->tmr_temask);
- mask &= ~ALM2EN;
- gfar_write(&etsects->regs->tmr_temask, mask);
- spin_unlock(&etsects->lock);
- etsects->alarm_value = 0;
- etsects->alarm_interval = 0;
- }
- }
-
- if (val & PP1) {
- ack |= PP1;
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(etsects->clock, &event);
- }
-
- if (ack) {
- gfar_write(&etsects->regs->tmr_tevent, ack);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-}
-
-/*
- * PTP clock operations
- */
-
-static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
-{
- u64 adj, diff;
- u32 tmr_add;
- int neg_adj = 0;
- struct etsects *etsects = container_of(ptp, struct etsects, caps);
-
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
- tmr_add = etsects->tmr_add;
- adj = tmr_add;
-
- /* calculate diff as adj*(scaled_ppm/65536)/1000000
- * and round() to the nearest integer
- */
- adj *= scaled_ppm;
- diff = div_u64(adj, 8000000);
- diff = (diff >> 13) + ((diff >> 12) & 1);
-
- tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-
- gfar_write(&etsects->regs->tmr_add, tmr_add);
-
- return 0;
-}
-
-static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- s64 now;
- unsigned long flags;
- struct etsects *etsects = container_of(ptp, struct etsects, caps);
-
- spin_lock_irqsave(&etsects->lock, flags);
-
- now = tmr_cnt_read(etsects);
- now += delta;
- tmr_cnt_write(etsects, now);
- set_fipers(etsects);
-
- spin_unlock_irqrestore(&etsects->lock, flags);
-
- return 0;
-}
-
-static int ptp_gianfar_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
-{
- u64 ns;
- unsigned long flags;
- struct etsects *etsects = container_of(ptp, struct etsects, caps);
-
- spin_lock_irqsave(&etsects->lock, flags);
-
- ns = tmr_cnt_read(etsects);
-
- spin_unlock_irqrestore(&etsects->lock, flags);
-
- *ts = ns_to_timespec64(ns);
-
- return 0;
-}
-
-static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- u64 ns;
- unsigned long flags;
- struct etsects *etsects = container_of(ptp, struct etsects, caps);
-
- ns = timespec64_to_ns(ts);
-
- spin_lock_irqsave(&etsects->lock, flags);
-
- tmr_cnt_write(etsects, ns);
- set_fipers(etsects);
-
- spin_unlock_irqrestore(&etsects->lock, flags);
-
- return 0;
-}
-
-static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
-{
- struct etsects *etsects = container_of(ptp, struct etsects, caps);
- unsigned long flags;
- u32 bit, mask;
-
- switch (rq->type) {
- case PTP_CLK_REQ_EXTTS:
- switch (rq->extts.index) {
- case 0:
- bit = ETS1EN;
- break;
- case 1:
- bit = ETS2EN;
- break;
- default:
- return -EINVAL;
- }
- spin_lock_irqsave(&etsects->lock, flags);
- mask = gfar_read(&etsects->regs->tmr_temask);
- if (on)
- mask |= bit;
- else
- mask &= ~bit;
- gfar_write(&etsects->regs->tmr_temask, mask);
- spin_unlock_irqrestore(&etsects->lock, flags);
- return 0;
-
- case PTP_CLK_REQ_PPS:
- spin_lock_irqsave(&etsects->lock, flags);
- mask = gfar_read(&etsects->regs->tmr_temask);
- if (on)
- mask |= PP1EN;
- else
- mask &= ~PP1EN;
- gfar_write(&etsects->regs->tmr_temask, mask);
- spin_unlock_irqrestore(&etsects->lock, flags);
- return 0;
-
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-static const struct ptp_clock_info ptp_gianfar_caps = {
- .owner = THIS_MODULE,
- .name = "gianfar clock",
- .max_adj = 512000,
- .n_alarm = 0,
- .n_ext_ts = N_EXT_TS,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 1,
- .adjfine = ptp_gianfar_adjfine,
- .adjtime = ptp_gianfar_adjtime,
- .gettime64 = ptp_gianfar_gettime,
- .settime64 = ptp_gianfar_settime,
- .enable = ptp_gianfar_enable,
-};
-
-static int gianfar_ptp_probe(struct platform_device *dev)
-{
- struct device_node *node = dev->dev.of_node;
- struct etsects *etsects;
- struct timespec64 now;
- int err = -ENOMEM;
- u32 tmr_ctrl;
- unsigned long flags;
-
- etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
- if (!etsects)
- goto no_memory;
-
- err = -ENODEV;
-
- etsects->caps = ptp_gianfar_caps;
-
- if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
- etsects->cksel = DEFAULT_CKSEL;
-
- if (of_property_read_u32(node,
- "fsl,tclk-period", &etsects->tclk_period) ||
- of_property_read_u32(node,
- "fsl,tmr-prsc", &etsects->tmr_prsc) ||
- of_property_read_u32(node,
- "fsl,tmr-add", &etsects->tmr_add) ||
- of_property_read_u32(node,
- "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
- of_property_read_u32(node,
- "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
- of_property_read_u32(node,
- "fsl,max-adj", &etsects->caps.max_adj)) {
- pr_err("device tree node missing required elements\n");
- goto no_node;
- }
-
- etsects->irq = platform_get_irq(dev, 0);
-
- if (etsects->irq < 0) {
- pr_err("irq not in device tree\n");
- goto no_node;
- }
- if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
- pr_err("request_irq failed\n");
- goto no_node;
- }
-
- etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!etsects->rsrc) {
- pr_err("no resource\n");
- goto no_resource;
- }
- if (request_resource(&iomem_resource, etsects->rsrc)) {
- pr_err("resource busy\n");
- goto no_resource;
- }
-
- spin_lock_init(&etsects->lock);
-
- etsects->regs = ioremap(etsects->rsrc->start,
- resource_size(etsects->rsrc));
- if (!etsects->regs) {
- pr_err("ioremap ptp registers failed\n");
- goto no_ioremap;
- }
- getnstimeofday64(&now);
- ptp_gianfar_settime(&etsects->caps, &now);
-
- tmr_ctrl =
- (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
- (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
-
- spin_lock_irqsave(&etsects->lock, flags);
-
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
- gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
- gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
- gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
- gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
- set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
-
- spin_unlock_irqrestore(&etsects->lock, flags);
-
- etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
- if (IS_ERR(etsects->clock)) {
- err = PTR_ERR(etsects->clock);
- goto no_clock;
- }
- gfar_phc_index = ptp_clock_index(etsects->clock);
-
- platform_set_drvdata(dev, etsects);
-
- return 0;
-
-no_clock:
- iounmap(etsects->regs);
-no_ioremap:
- release_resource(etsects->rsrc);
-no_resource:
- free_irq(etsects->irq, etsects);
-no_node:
- kfree(etsects);
-no_memory:
- return err;
-}
-
-static int gianfar_ptp_remove(struct platform_device *dev)
-{
- struct etsects *etsects = platform_get_drvdata(dev);
-
- gfar_write(&etsects->regs->tmr_temask, 0);
- gfar_write(&etsects->regs->tmr_ctrl, 0);
-
- gfar_phc_index = -1;
- ptp_clock_unregister(etsects->clock);
- iounmap(etsects->regs);
- release_resource(etsects->rsrc);
- free_irq(etsects->irq, etsects);
- kfree(etsects);
-
- return 0;
-}
-
-static const struct of_device_id match_table[] = {
- { .compatible = "fsl,etsec-ptp" },
- {},
-};
-MODULE_DEVICE_TABLE(of, match_table);
-
-static struct platform_driver gianfar_ptp_driver = {
- .driver = {
- .name = "gianfar_ptp",
- .of_match_table = match_table,
- },
- .probe = gianfar_ptp_probe,
- .remove = gianfar_ptp_remove,
-};
-
-module_platform_driver(gianfar_ptp_driver);
-
-MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
-MODULE_DESCRIPTION("PTP clock using the eTSEC");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e0bc79ea3d88..85e1d14514fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1648,6 +1648,15 @@ int hns_dsaf_rm_mac_addr(
mac_entry->addr);
}
+static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev,
+ u8 port_num, u8 *mask, u8 *addr)
+{
+ if (MAC_IS_BROADCAST(addr))
+ memset(mask, 0xff, ETH_ALEN);
+ else
+ memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN);
+}
+
static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
{
u16 *a = (u16 *)dst;
@@ -1676,7 +1685,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
u8 mc_addr[ETH_ALEN];
- u8 *mc_mask;
int mskid;
/*chechk mac addr */
@@ -1687,9 +1695,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
}
ether_addr_copy(mc_addr, mac_entry->addr);
- mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask;
if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
/* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
/* config key mask */
@@ -1844,7 +1855,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
u8 mc_addr[ETH_ALEN];
- u8 *mc_mask;
if (!(void *)mac_entry) {
dev_err(dsaf_dev->dev,
@@ -1861,14 +1871,17 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* always mask vlan_id field */
ether_addr_copy(mc_addr, mac_entry->addr);
- mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask;
if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
/* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
/* config key mask */
- hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr);
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
mask_key.high.val = le32_to_cpu(mask_key.high.val);
mask_key.low.val = le32_to_cpu(mask_key.low.val);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 519e2bd6aa60..be9dc08ccf67 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
+ HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
+ HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
};
/* below are per-VF vlan cfg subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 02145f2de820..9d79dad2c6aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
return false;
}
+static void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev, int inited)
+{
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ break;
+ case HNAE3_CLIENT_UNIC:
+ hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hnae3_get_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ int inited = 0;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ inited = hnae_get_bit(ae_dev->flag,
+ HNAE3_KNIC_CLIENT_INITED_B);
+ break;
+ case HNAE3_CLIENT_UNIC:
+ inited = hnae_get_bit(ae_dev->flag,
+ HNAE3_UNIC_CLIENT_INITED_B);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ inited = hnae_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
+ break;
+ default:
+ break;
+ }
+
+ return inited;
+}
+
static int hnae3_match_n_instantiate(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, bool is_reg)
{
@@ -50,13 +93,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret)
+ if (ret) {
dev_err(&ae_dev->pdev->dev,
"fail to instantiate client\n");
- return ret;
+ return ret;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+ return 0;
+ }
+
+ if (hnae3_get_client_init_flag(client, ae_dev)) {
+ ae_dev->ops->uninit_client_instance(client, ae_dev);
+
+ hnae3_set_client_init_flag(client, ae_dev, 0);
}
- ae_dev->ops->uninit_client_instance(client, ae_dev);
return 0;
}
@@ -89,7 +141,7 @@ int hnae3_register_client(struct hnae3_client *client)
exit:
mutex_unlock(&hnae3_common_lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(hnae3_register_client);
@@ -112,7 +164,7 @@ EXPORT_SYMBOL(hnae3_unregister_client);
* @ae_algo: AE algorithm
* NOTE: the duplicated name will not be checked
*/
-int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
@@ -151,8 +203,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
}
mutex_unlock(&hnae3_common_lock);
-
- return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_algo);
@@ -168,6 +218,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id)
continue;
@@ -191,22 +244,14 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device
* NOTE: the duplicated name will not be checked
*/
-int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
+void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0, lock_acquired;
+ int ret = 0;
- /* we can get deadlocked if SRIOV is being enabled in context to probe
- * and probe gets called again in same context. This can happen when
- * pci_enable_sriov() is called to create VFs from PF probes context.
- * Therefore, for simplicity uniformly defering further probing in all
- * cases where we detect contention.
- */
- lock_acquired = mutex_trylock(&hnae3_common_lock);
- if (!lock_acquired)
- return -EPROBE_DEFER;
+ mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
@@ -220,7 +265,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
- ret = -EOPNOTSUPP;
goto out_err;
}
@@ -247,8 +291,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
out_err:
mutex_unlock(&hnae3_common_lock);
-
- return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_dev);
@@ -264,6 +306,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id)
continue;
@@ -283,3 +328,4 @@ EXPORT_SYMBOL(hnae3_unregister_ae_dev);
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
+MODULE_VERSION(HNAE3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 37ec1b3286c6..8acb1d116a02 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -36,6 +36,8 @@
#include <linux/pci.h>
#include <linux/types.h>
+#define HNAE3_MOD_VERSION "1.0"
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
@@ -52,6 +54,9 @@
#define HNAE3_DEV_INITED_B 0x0
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+#define HNAE3_KNIC_CLIENT_INITED_B 0x3
+#define HNAE3_UNIC_CLIENT_INITED_B 0x4
+#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -273,10 +278,6 @@ struct hnae3_ae_dev {
* Map rings to vector
* unmap_ring_from_vector()
* Unmap rings from vector
- * add_tunnel_udp()
- * Add tunnel information to hardware
- * del_tunnel_udp()
- * Delete tunnel information from hardware
* reset_queue()
* Reset queue
* get_fw_version()
@@ -315,7 +316,8 @@ struct hnae3_ae_ops {
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
- void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en);
+ void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -351,6 +353,7 @@ struct hnae3_ae_ops {
const unsigned char *addr);
int (*rm_mc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
+ int (*update_mta_status)(struct hnae3_handle *handle);
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
void (*update_stats)(struct hnae3_handle *handle,
@@ -388,9 +391,6 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
- int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
- int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
-
void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
@@ -521,11 +521,11 @@ struct hnae3_handle {
#define hnae_get_bit(origin, shift) \
hnae_get_field((origin), (0x1 << (shift)), (shift))
-int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
-int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8c55965a66ac..f2b31d278bc9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -25,6 +25,9 @@
#include "hnae3.h"
#include "hns3_enet.h"
+static void hns3_clear_all_ring(struct hnae3_handle *h);
+static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+
static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
@@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
int i, j;
int ret;
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
@@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
+ /* disable vectors */
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
ops->stop(priv->ae_handle);
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
/* free irq resources */
hns3_nic_uninit_irq(priv);
+
+ hns3_clear_all_ring(priv->ae_handle);
}
static int hns3_nic_net_stop(struct net_device *netdev)
@@ -406,15 +415,21 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
if (h->ae_algo->ops->set_promisc_mode) {
if (netdev->flags & IFF_PROMISC)
- h->ae_algo->ops->set_promisc_mode(h, 1);
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ else if (netdev->flags & IFF_ALLMULTI)
+ h->ae_algo->ops->set_promisc_mode(h, false, true);
else
- h->ae_algo->ops->set_promisc_mode(h, 0);
+ h->ae_algo->ops->set_promisc_mode(h, false, false);
}
if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
netdev_err(netdev, "sync uc address fail\n");
- if (netdev->flags & IFF_MULTICAST)
+ if (netdev->flags & IFF_MULTICAST) {
if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
netdev_err(netdev, "sync mc address fail\n");
+
+ if (h->ae_algo->ops->update_mta_status)
+ h->ae_algo->ops->update_mta_status(h);
+ }
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
@@ -502,7 +517,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
/* find outer header point */
l3.hdr = skb_network_header(skb);
- l4_hdr = skb_inner_transport_header(skb);
+ l4_hdr = skb_transport_header(skb);
if (skb->protocol == htons(ETH_P_IPV6)) {
exthdr = l3.hdr + sizeof(*l3.v6);
@@ -644,6 +659,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
}
}
+/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
+ * and it is udp packet, which has a dest port as the IANA assigned.
+ * the hardware is expected to do the checksum offload, but the
+ * hardware will not do the checksum offload when udp dest port is
+ * 4789.
+ */
+static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+{
+#define IANA_VXLAN_PORT 4789
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ struct gre_base_hdr *gre;
+ unsigned char *hdr;
+ } l4;
+
+ l4.hdr = skb_transport_header(skb);
+
+ if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ return false;
+
+ skb_checksum_help(skb);
+
+ return true;
+}
+
static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
@@ -732,6 +773,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
+ if (hns3_tunnel_csum_bug(skb))
+ break;
+
hnae_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S,
@@ -1121,6 +1165,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
return -EADDRNOTAVAIL;
+ if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
+ netdev_info(netdev, "already using mac address %pM\n",
+ mac_addr->sa_data);
+ return 0;
+ }
+
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
if (ret) {
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1244,93 +1294,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
stats->tx_compressed = netdev->stats.tx_compressed;
}
-static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
- enum hns3_udp_tnl_type type)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
- struct hnae3_handle *h = priv->ae_handle;
-
- if (udp_tnl->used && udp_tnl->dst_port == port) {
- udp_tnl->used++;
- return;
- }
-
- if (udp_tnl->used) {
- netdev_warn(netdev,
- "UDP tunnel [%d], port [%d] offload\n", type, port);
- return;
- }
-
- udp_tnl->dst_port = port;
- udp_tnl->used = 1;
- /* TBD send command to hardware to add port */
- if (h->ae_algo->ops->add_tunnel_udp)
- h->ae_algo->ops->add_tunnel_udp(h, port);
-}
-
-static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
- enum hns3_udp_tnl_type type)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
- struct hnae3_handle *h = priv->ae_handle;
-
- if (!udp_tnl->used || udp_tnl->dst_port != port) {
- netdev_warn(netdev,
- "Invalid UDP tunnel port %d\n", port);
- return;
- }
-
- udp_tnl->used--;
- if (udp_tnl->used)
- return;
-
- udp_tnl->dst_port = 0;
- /* TBD send command to hardware to del port */
- if (h->ae_algo->ops->del_tunnel_udp)
- h->ae_algo->ops->del_tunnel_udp(h, port);
-}
-
-/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
- * @netdev: This physical ports's netdev
- * @ti: Tunnel information
- */
-static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- u16 port_n = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
- break;
- default:
- netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
- break;
- }
-}
-
-static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- u16 port_n = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
- break;
- default:
- break;
- }
-}
-
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
@@ -1569,13 +1532,50 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
- .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
- .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
+static bool hns3_is_phys_func(struct pci_dev *pdev)
+{
+ u32 dev_id = pdev->device;
+
+ switch (dev_id) {
+ case HNAE3_DEV_ID_GE:
+ case HNAE3_DEV_ID_25GE:
+ case HNAE3_DEV_ID_25GE_RDMA:
+ case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_50GE_RDMA:
+ case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ return true;
+ case HNAE3_DEV_ID_100G_VF:
+ case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
+ return false;
+ default:
+ dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+ dev_id);
+ }
+
+ return false;
+}
+
+static void hns3_disable_sriov(struct pci_dev *pdev)
+{
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "disabling driver while VFs are assigned\n");
+ return;
+ }
+
+ pci_disable_sriov(pdev);
+}
+
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -1603,7 +1603,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev);
- return hnae3_register_ae_dev(ae_dev);
+ hnae3_register_ae_dev(ae_dev);
+
+ return 0;
}
/* hns3_remove - Device removal routine
@@ -1613,21 +1615,56 @@ static void hns3_remove(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
+ hns3_disable_sriov(pdev);
+
hnae3_unregister_ae_dev(ae_dev);
}
+/**
+ * hns3_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int ret;
+
+ if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
+ dev_warn(&pdev->dev, "Can not config SRIOV\n");
+ return -EINVAL;
+ }
+
+ if (num_vfs) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
+ else
+ return num_vfs;
+ } else if (!pci_vfs_assigned(pdev)) {
+ pci_disable_sriov(pdev);
+ } else {
+ dev_warn(&pdev->dev,
+ "Unable to free VFs because some are assigned to VMs.\n");
+ }
+
+ return 0;
+}
+
static struct pci_driver hns3_driver = {
.name = hns3_driver_name,
.id_table = hns3_pci_tbl,
.probe = hns3_probe,
.remove = hns3_remove,
+ .sriov_configure = hns3_pci_sriov_configure,
};
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
- struct hnae3_handle *h = hns3_get_handle(netdev);
-
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1656,15 +1693,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->hw_features |=
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -1836,6 +1869,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].rx.bd_base_info = 0;
}
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
@@ -1843,6 +1877,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.bd_base_info = 0;
}
static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
@@ -1971,106 +2006,6 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
-/* hns3_nic_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- */
-static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
- unsigned int max_size)
-{
- unsigned char *network;
- u8 hlen;
-
- /* This should never happen, but better safe than sorry */
- if (max_size < ETH_HLEN)
- return max_size;
-
- /* Initialize network frame pointer */
- network = data;
-
- /* Set first protocol and move network header forward */
- network += ETH_HLEN;
-
- /* Handle any vlan tag if present */
- if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
- == HNS3_RX_FLAG_VLAN_PRESENT) {
- if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
- return max_size;
-
- network += VLAN_HLEN;
- }
-
- /* Handle L3 protocols */
- if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
- == HNS3_RX_FLAG_L3ID_IPV4) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct iphdr)))
- return max_size;
-
- /* Access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (network[0] & 0x0F) << 2;
-
- /* Verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return network - data;
-
- /* Record next protocol if header is present */
- } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
- == HNS3_RX_FLAG_L3ID_IPV6) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct ipv6hdr)))
- return max_size;
-
- /* Record next protocol */
- hlen = sizeof(struct ipv6hdr);
- } else {
- return network - data;
- }
-
- /* Relocate pointer to start of L4 header */
- network += hlen;
-
- /* Finally sort out TCP/UDP */
- if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
- == HNS3_RX_FLAG_L4ID_TCP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct tcphdr)))
- return max_size;
-
- /* Access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (network[12] & 0xF0) >> 2;
-
- /* Verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return network - data;
-
- network += hlen;
- } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
- == HNS3_RX_FLAG_L4ID_UDP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct udphdr)))
- return max_size;
-
- network += sizeof(struct udphdr);
- }
-
- /* If everything has gone correctly network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((typeof(max_size))(network - data) < max_size)
- return network - data;
- else
- return max_size;
-}
-
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2183,6 +2118,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
+static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info)
+{
+ struct pci_dev *pdev = ring->tqp->handle->pdev;
+ u16 vlan_tag;
+
+ if (pdev->revision == 0x20) {
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(vlan_tag & VLAN_VID_MASK))
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return vlan_tag;
+ }
+
+#define HNS3_STRP_OUTER_VLAN 0x1
+#define HNS3_STRP_INNER_VLAN 0x2
+
+ switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
+ case HNS3_STRP_OUTER_VLAN:
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ break;
+ case HNS3_STRP_INNER_VLAN:
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ break;
+ default:
+ vlan_tag = 0;
+ break;
+ }
+
+ return vlan_tag;
+}
+
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb, int *out_bnum)
{
@@ -2202,9 +2170,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetch(desc);
- length = le16_to_cpu(desc->rx.pkt_len);
+ length = le16_to_cpu(desc->rx.size);
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- l234info = le32_to_cpu(desc->rx.l234_info);
/* Check valid BD */
if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
@@ -2238,22 +2205,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data);
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
-
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- if (!(vlan_tag & VLAN_VID_MASK))
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
- }
-
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2270,8 +2221,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- pull_len = hns3_nic_get_headlen(va, l234info,
- HNS3_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
@@ -2290,6 +2241,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
*out_bnum = bnum;
+ l234info = le32_to_cpu(desc->rx.l234_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
@@ -3022,8 +2989,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
goto out_when_alloc_ring_memory;
}
- hns3_init_ring_hw(priv->ring_data[i].ring);
-
u64_stats_init(&priv->ring_data[i].ring->syncp);
}
@@ -3052,13 +3017,13 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static void hns3_init_mac_addr(struct net_device *netdev)
+static void hns3_init_mac_addr(struct net_device *netdev, bool init)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
- if (h->ae_algo->ops->get_mac_addr) {
+ if (h->ae_algo->ops->get_mac_addr && init) {
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
}
@@ -3075,6 +3040,15 @@ static void hns3_init_mac_addr(struct net_device *netdev)
}
+static void hns3_uninit_mac_addr(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (h->ae_algo->ops->rm_uc_addr)
+ h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
+}
+
static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -3112,7 +3086,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
- hns3_init_mac_addr(netdev);
+ hns3_init_mac_addr(netdev, true);
hns3_set_default_feature(netdev);
@@ -3185,6 +3159,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_force_clear_all_rx_ring(handle);
+
ret = hns3_nic_uninit_vector_data(priv);
if (ret)
netdev_err(netdev, "uninit vector error\n");
@@ -3201,6 +3177,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
+ hns3_uninit_mac_addr(netdev);
+
free_netdev(netdev);
}
@@ -3298,9 +3276,76 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
hns3_nic_mc_sync(ndev, ha->addr);
}
-static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
+static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
- dev_kfree_skb_any(skb);
+ while (ring->next_to_clean != ring->next_to_use) {
+ ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
+ hns3_free_buffer_detach(ring, ring->next_to_clean);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+}
+
+static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ struct hns3_desc_cb res_cbs;
+ int ret;
+
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so we need to replace the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ ret = hns3_reserve_buffer_map(ring, &res_cbs);
+ if (ret) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ /* if alloc new buffer fail, exit directly
+ * and reclear in up flow.
+ */
+ netdev_warn(ring->tqp->handle->kinfo.netdev,
+ "reserve buffer map failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use,
+ &res_cbs);
+ }
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ return 0;
+}
+
+static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so only need to unmap the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ hns3_unmap_buffer(ring,
+ &ring->desc_cb[ring->next_to_use]);
+ ring->desc_cb[ring->next_to_use].dma = 0;
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+}
+
+static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *ring;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ hns3_force_clear_rx_ring(ring);
+ }
}
static void hns3_clear_all_ring(struct hnae3_handle *h)
@@ -3314,14 +3359,55 @@ static void hns3_clear_all_ring(struct hnae3_handle *h)
struct hns3_enet_ring *ring;
ring = priv->ring_data[i].ring;
- hns3_clean_tx_ring(ring, ring->desc_num);
+ hns3_clear_tx_ring(ring);
dev_queue = netdev_get_tx_queue(ndev,
priv->ring_data[i].queue_index);
netdev_tx_reset_queue(dev_queue);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
- hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
+ /* Continue to clear other rings even if clearing some
+ * rings failed.
+ */
+ hns3_clear_rx_ring(ring);
+ }
+}
+
+int hns3_nic_reset_all_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *rx_ring;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->ae_algo->ops->reset_queue(h, i);
+ hns3_init_ring_hw(priv->ring_data[i].ring);
+
+ /* We need to clear tx ring here because self test will
+ * use the ring and will not run down before up
+ */
+ hns3_clear_tx_ring(priv->ring_data[i].ring);
+ priv->ring_data[i].ring->next_to_clean = 0;
+ priv->ring_data[i].ring->next_to_use = 0;
+
+ rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ hns3_init_ring_hw(rx_ring);
+ ret = hns3_clear_rx_ring(rx_ring);
+ if (ret)
+ return ret;
+
+ /* We can not know the hardware head and tail when this
+ * function is called in reset flow, so we reuse all desc.
+ */
+ for (j = 0; j < rx_ring->desc_num; j++)
+ hns3_reuse_buffer(rx_ring, j);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
}
+
+ return 0;
}
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
@@ -3359,7 +3445,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_init_mac_addr(netdev);
+ hns3_init_mac_addr(netdev, false);
hns3_nic_set_rx_mode(netdev);
hns3_recover_hw_addr(netdev);
@@ -3393,7 +3479,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_clear_all_ring(handle);
+ hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
@@ -3409,6 +3495,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
priv->ring_data = NULL;
+ hns3_uninit_mac_addr(netdev);
+
return ret;
}
@@ -3529,8 +3617,6 @@ int hns3_set_channels(struct net_device *netdev,
if (if_running)
hns3_nic_net_stop(netdev);
- hns3_clear_all_ring(h);
-
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
dev_err(&netdev->dev,
@@ -3600,6 +3686,8 @@ static int __init hns3_init_module(void)
client.ops = &client_ops;
+ INIT_LIST_HEAD(&client.node);
+
ret = hnae3_register_client(&client);
if (ret)
return ret;
@@ -3627,3 +3715,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");
+MODULE_VERSION(HNS3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 98cdbd3a1163..3b083d5ae9ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -14,6 +14,8 @@
#include "hnae3.h"
+#define HNS3_MOD_VERSION "1.0"
+
extern const char hns3_driver_version[];
enum hns3_nic_state {
@@ -102,6 +104,9 @@ enum hns3_nic_state {
#define HNS3_RXD_L4ID_S 8
#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
#define HNS3_RXD_FRAG_B 12
+#define HNS3_RXD_STRP_TAGP_S 13
+#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
+
#define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18
@@ -620,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev,
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
+int hns3_nic_reset_all_ring(struct hnae3_handle *h);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index eb3c34f3cf87..40c0425b4023 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -74,7 +74,7 @@ struct hns3_link_mode_mapping {
u32 ethtool_link_mode;
};
-static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
+static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
@@ -85,11 +85,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
switch (loop) {
case HNAE3_MAC_INTER_LOOP_MAC:
- ret = h->ae_algo->ops->set_loopback(h, loop, true);
- break;
- case HNAE3_MAC_LOOP_NONE:
- ret = h->ae_algo->ops->set_loopback(h,
- HNAE3_MAC_INTER_LOOP_MAC, false);
+ ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
ret = -ENOTSUPP;
@@ -99,10 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
if (ret)
return ret;
- if (loop == HNAE3_MAC_LOOP_NONE)
- h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC);
- else
- h->ae_algo->ops->set_promisc_mode(h, 1);
+ h->ae_algo->ops->set_promisc_mode(h, en, en);
return ret;
}
@@ -115,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
if (!h->ae_algo->ops->start)
return -EOPNOTSUPP;
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
ret = h->ae_algo->ops->start(h);
if (ret) {
netdev_err(ndev,
@@ -122,13 +119,13 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
}
- ret = hns3_lp_setup(ndev, loop_mode);
+ ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
return ret;
}
-static int hns3_lp_down(struct net_device *ndev)
+static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
@@ -136,7 +133,7 @@ static int hns3_lp_down(struct net_device *ndev)
if (!h->ae_algo->ops->stop)
return -EOPNOTSUPP;
- ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE);
+ ret = hns3_lp_setup(ndev, loop_mode, false);
if (ret) {
netdev_err(ndev, "lb_setup return error: %d\n", ret);
return ret;
@@ -332,7 +329,7 @@ static void hns3_self_test(struct net_device *ndev,
data[test_index] = hns3_lp_up(ndev, loop_type);
if (!data[test_index]) {
data[test_index] = hns3_lp_run_test(ndev, loop_type);
- hns3_lp_down(ndev);
+ hns3_lp_down(ndev, loop_type);
}
if (data[test_index])
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ff13d1876d9e..c36d64710fa6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -31,6 +31,17 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h >= ring->desc_num))
+ return 0;
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
@@ -141,6 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
+ struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
struct hclge_cmq_ring *csq = &hw->cmq.csq;
u16 ntc = csq->next_to_clean;
struct hclge_desc *desc;
@@ -149,6 +161,13 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ return 0;
+ }
while (head != ntc) {
memset(desc, 0, sizeof(*desc));
@@ -171,7 +190,11 @@ static int hclge_cmd_csq_done(struct hclge_hw *hw)
static bool hclge_is_special_opcode(u16 opcode)
{
- u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032};
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -362,9 +385,9 @@ int hclge_cmd_init(struct hclge_dev *hdev)
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{
- spin_lock_bh(&ring->lock);
+ spin_lock(&ring->lock);
hclge_free_cmd_desc(ring);
- spin_unlock_bh(&ring->lock);
+ spin_unlock(&ring->lock);
}
void hclge_destroy_cmd_queue(struct hclge_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ee3cbac6dfaa..d9aaa76c76eb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -115,7 +115,6 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */
/* PFC/Pause CMD*/
@@ -484,6 +483,8 @@ struct hclge_promisc_param {
u8 enable;
};
+#define HCLGE_PROMISC_TX_EN_B BIT(4)
+#define HCLGE_PROMISC_RX_EN_B BIT(5)
#define HCLGE_PROMISC_EN_B 1
#define HCLGE_PROMISC_EN_ALL 0x7
#define HCLGE_PROMISC_EN_UC 0x1
@@ -704,11 +705,14 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
-#define HCLGE_ACCEPT_TAG_B 0
-#define HCLGE_ACCEPT_UNTAG_B 1
+#define HCLGE_ACCEPT_TAG1_B 0
+#define HCLGE_ACCEPT_UNTAG1_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2
#define HCLGE_PORT_INS_TAG2_EN_B 3
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+#define HCLGE_ACCEPT_TAG2_B 5
+#define HCLGE_ACCEPT_UNTAG2_B 6
+
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
@@ -813,21 +817,13 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
-#define HCLGE_LED_PORT_SPEED_STATE_S 0
-#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
-#define HCLGE_LED_ACTIVITY_STATE_S 0
-#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
-#define HCLGE_LED_LINK_STATE_S 0
-#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
struct hclge_set_led_state_cmd {
- u8 port_speed_led_config;
- u8 link_led_config;
- u8 activity_led_config;
+ u8 rsv1[3];
u8 locate_led_config;
- u8 rsv[20];
+ u8 rsv2[20];
};
int hclge_cmd_init(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 2066dd734444..d318d35e598f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
-static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -304,8 +303,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
{"mac_tx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_12287_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
{"mac_tx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
{"mac_tx_9217_12287_oct_pkt_num",
@@ -356,8 +353,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
{"mac_rx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_12287_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
{"mac_rx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
{"mac_rx_9217_12287_oct_pkt_num",
@@ -508,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0;
}
-static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
-{
- struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
- struct hclge_desc desc;
- __le64 *desc_data;
- int ret;
-
- /* for fiber port, need to query the total rx/tx packets statstics,
- * used for data transferring checking.
- */
- if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
- return 0;
-
- if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
- return 0;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get MAC total pkt stats fail, ret = %d\n", ret);
-
- return ret;
- }
-
- desc_data = (__le64 *)(&desc.data[0]);
- mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
- mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
-
- return 0;
-}
-
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -1459,8 +1422,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
/* We need to alloc a vport for main NIC of PF */
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
- if (hdev->num_tqps < num_vport)
- num_vport = hdev->num_tqps;
+ if (hdev->num_tqps < num_vport) {
+ dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ hdev->num_tqps, num_vport);
+ return -EINVAL;
+ }
/* Alloc the same number of TQPs for every vport */
tqp_per_vport = hdev->num_tqps / num_vport;
@@ -1474,21 +1440,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
hdev->vport = vport;
hdev->num_alloc_vport = num_vport;
-#ifdef CONFIG_PCI_IOV
- /* Enable SRIOV */
- if (hdev->num_req_vfs) {
- dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
- hdev->num_req_vfs);
- ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
- if (ret) {
- hdev->num_alloc_vfs = 0;
- dev_err(&pdev->dev, "SRIOV enable failed %d\n",
- ret);
- return ret;
- }
- }
- hdev->num_alloc_vfs = hdev->num_req_vfs;
-#endif
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ hdev->num_alloc_vfs = hdev->num_req_vfs;
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
@@ -2335,8 +2288,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct hclge_vport *vport;
int mtu;
int ret;
+ int i;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
if (ret) {
@@ -2348,7 +2303,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
/* Initialize the MTA table work mode */
- hdev->accept_mta_mc = true;
hdev->enable_mta = true;
hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
@@ -2361,11 +2315,17 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n", ret);
- return ret;
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->accept_mta_mc = false;
+
+ memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
+ ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
}
ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
@@ -2597,6 +2557,15 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
}
}
+static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
+{
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
+ BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
+ BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
+ BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2627,16 +2596,18 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
* mbx messages reported by this interrupt.
*/
hclge_mbx_task_schedule(hdev);
-
+ break;
default:
- dev_dbg(&hdev->pdev->dev,
- "received unknown or unhandled event of vector0\n");
+ dev_warn(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
break;
}
- /* we should clear the source of interrupt */
- hclge_clear_event_cause(hdev, event_cause, clearval);
- hclge_enable_vector(&hdev->misc_vector, true);
+ /* clear the source of interrupt if it is not cause by reset */
+ if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -2824,6 +2795,33 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
return rst_level;
}
+static void hclge_clear_reset_cause(struct hclge_dev *hdev)
+{
+ u32 clearval = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ break;
+ case HNAE3_CORE_RESET:
+ clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ break;
+ default:
+ dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
+ hdev->reset_type);
+ break;
+ }
+
+ if (!clearval)
+ return;
+
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
/* perform reset of the stack & ae device for a client */
@@ -2836,6 +2834,8 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
rtnl_unlock();
+
+ hclge_clear_reset_cause(hdev);
} else {
/* schedule again to check pending resets later */
set_bit(hdev->reset_type, &hdev->reset_pending);
@@ -2930,38 +2930,16 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- /* The total rx/tx packets statstics are wanted to be updated
- * per second. Both hclge_update_stats_for_all() and
- * hclge_mac_get_traffic_stats() can do it.
- */
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
hdev->hw_stats.stats_timer = 0;
- } else {
- hclge_mac_get_traffic_stats(hdev);
}
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_led_status(hdev);
hclge_service_complete(hdev);
}
-static void hclge_disable_sriov(struct hclge_dev *hdev)
-{
- /* If our VFs are assigned we cannot shut down SR-IOV
- * without causing issues, so just leave the hardware
- * available but disabled
- */
- if (pci_vfs_assigned(hdev->pdev)) {
- dev_warn(&hdev->pdev->dev,
- "disabling driver while VFs are assigned\n");
- return;
- }
-
- pci_disable_sriov(hdev->pdev);
-}
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
{
/* VF handle has no client */
@@ -3615,7 +3593,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
req = (struct hclge_promisc_cfg_cmd *)desc.data;
req->vf_id = param->vf_id;
- req->flag = (param->enable << HCLGE_PROMISC_EN_B);
+
+ /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
+ * pdev revision(0x20), new revision support them. The
+ * value of this two fields will not return error when driver
+ * send command to fireware in revision(0x20).
+ */
+ req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
+ HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3642,13 +3627,15 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_promisc_param param;
- hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
+ vport->vport_id);
hclge_cmd_set_promisc_mode(hdev, &param);
}
@@ -3683,48 +3670,50 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
-static int hclge_set_loopback(struct hnae3_handle *handle,
- enum hnae3_loop loop_mode, bool en)
+static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_mac_mode_cmd *req;
- struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
u32 loop_en;
int ret;
- switch (loop_mode) {
- case HNAE3_MAC_INTER_LOOP_MAC:
- req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
- /* 1 Read out the MAC mode config at first */
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_CONFIG_MAC_MODE,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac loopback get fail, ret =%d.\n",
- ret);
- return ret;
- }
+ req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+ /* 1 Read out the MAC mode config at first */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac loopback get fail, ret =%d.\n", ret);
+ return ret;
+ }
- /* 2 Then setup the loopback flag */
- loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- if (en)
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
- else
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ /* 2 Then setup the loopback flag */
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
- req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
- /* 3 Config mac work mode with loopback flag
- * and its original configure parameters
- */
- hclge_cmd_reuse_desc(&desc, false);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "mac loopback set fail, ret =%d.\n", ret);
+ /* 3 Config mac work mode with loopback flag
+ * and its original configure parameters
+ */
+ hclge_cmd_reuse_desc(&desc, false);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "mac loopback set fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ switch (loop_mode) {
+ case HNAE3_MAC_INTER_LOOP_MAC:
+ ret = hclge_set_mac_loopback(hdev, en);
break;
default:
ret = -ENOTSUPP;
@@ -3783,13 +3772,11 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw.mac.link = 0;
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- return 0;
-
ret = hclge_mac_start_phy(hdev);
if (ret)
return ret;
@@ -3805,9 +3792,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
del_timer_sync(&hdev->service_timer);
cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_mac_stop_phy(hdev);
return;
+ }
for (i = 0; i < vport->alloc_tqps; i++)
hclge_tqp_enable(hdev, i, 0, false);
@@ -3819,6 +3809,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
hclge_update_link_status(hdev);
}
@@ -4029,9 +4021,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
return ret;
}
+ if (enable)
+ set_bit(idx, vport->mta_shadow);
+ else
+ clear_bit(idx, vport->mta_shadow);
+
return 0;
}
+static int hclge_update_mta_status(struct hnae3_handle *handle)
+{
+ unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct netdev_hw_addr *ha;
+ u16 tbl_idx;
+
+ memset(mta_status, 0, sizeof(mta_status));
+
+ /* update mta_status from mc addr list */
+ netdev_for_each_mc_addr(ha, netdev) {
+ tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
+ set_bit(tbl_idx, mta_status);
+ }
+
+ return hclge_update_mta_status_common(vport, mta_status,
+ 0, HCLGE_MTA_TBL_SIZE, true);
+}
+
+int hclge_update_mta_status_common(struct hclge_vport *vport,
+ unsigned long *status,
+ u16 idx,
+ u16 count,
+ bool update_filter)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 update_max = idx + count;
+ u16 check_max;
+ int ret = 0;
+ bool used;
+ u16 i;
+
+ /* setup mta check range */
+ if (update_filter) {
+ i = 0;
+ check_max = HCLGE_MTA_TBL_SIZE;
+ } else {
+ i = idx;
+ check_max = update_max;
+ }
+
+ used = false;
+ /* check and update all mta item */
+ for (; i < check_max; i++) {
+ /* ignore unused item */
+ if (!test_bit(i, vport->mta_shadow))
+ continue;
+
+ /* if i in update range then update it */
+ if (i >= idx && i < update_max)
+ if (!test_bit(i - idx, status))
+ hclge_set_mta_table_item(vport, i, false);
+
+ if (!used && test_bit(i, vport->mta_shadow))
+ used = true;
+ }
+
+ /* no longer use mta, disable it */
+ if (vport->accept_mta_mc && update_filter && !used) {
+ ret = hclge_cfg_func_mta_filter(hdev,
+ vport->vport_id,
+ false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "disable func mta filter fail ret=%d\n",
+ ret);
+ else
+ vport->accept_mta_mc = false;
+ }
+
+ return ret;
+}
+
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd *req)
{
@@ -4299,9 +4370,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
- /* Set MTA table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
+ /* If mc mac vlan table is full, use MTA table */
+ if (status == -ENOSPC) {
+ if (!vport->accept_mta_mc) {
+ status = hclge_cfg_func_mta_filter(hdev,
+ vport->vport_id,
+ true);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n",
+ status);
+ return status;
+ }
+ vport->accept_mta_mc = true;
+ }
+
+ /* Set MTA table for this MAC address */
+ tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
+ status = hclge_set_mta_table_item(vport, tbl_idx, true);
+ }
return status;
}
@@ -4321,7 +4408,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
struct hclge_desc desc[3];
- u16 tbl_idx;
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
@@ -4350,17 +4436,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
} else {
- /* This mac addr do not exist, can't delete it */
- dev_err(&hdev->pdev->dev,
- "Rm multicast mac addr failed, ret = %d.\n",
- status);
- return -EIO;
+ /* Maybe this mac address is in mta table, but it cannot be
+ * deleted here because an entry of mta represents an address
+ * range rather than a specific address. the delete action to
+ * all entries will take effect in update_mta_status called by
+ * hns3_nic_set_rx_mode.
+ */
+ status = 0;
}
- /* Set MTB table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, false);
-
return status;
}
@@ -4540,8 +4624,9 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
}
-int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
- bool is_kill, u16 vlan, u8 qos, __be16 proto)
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
+ bool is_kill, u16 vlan, u8 qos,
+ __be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vlan_filter_vf_cfg_cmd *req0;
@@ -4581,9 +4666,16 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
}
if (!is_kill) {
+#define HCLGE_VF_VLAN_NO_ENTRY 2
if (!req0->resp_code || req0->resp_code == 1)
return 0;
+ if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ dev_warn(&hdev->pdev->dev,
+ "vf vlan table is full, vf vlan filter is disabled\n");
+ return 0;
+ }
+
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%d.\n",
req0->resp_code);
@@ -4599,12 +4691,9 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
return -EIO;
}
-static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
- __be16 proto, u16 vlan_id,
- bool is_kill)
+static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
+ u16 vlan_id, bool is_kill)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
struct hclge_vlan_filter_pf_cfg_cmd *req;
struct hclge_desc desc;
u8 vlan_offset_byte_val;
@@ -4624,22 +4713,66 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "port vlan command, send fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
+ u16 vport_id, u16 vlan_id, u8 qos,
+ bool is_kill)
+{
+ u16 vport_idx, vport_num = 0;
+ int ret;
+
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
+ 0, proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "port vlan command, send fail, ret =%d.\n",
- ret);
+ "Set %d vport vlan filter config fail, ret =%d.\n",
+ vport_id, ret);
return ret;
}
- ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
- if (ret) {
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return 0;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Set pf vlan filter config fail, ret =%d.\n",
- ret);
- return -EIO;
+ "Add port vlan failed, vport %d is already in vlan %d\n",
+ vport_id, vlan_id);
+ return -EINVAL;
}
- return 0;
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_err(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %d is not in vlan %d\n",
+ vport_id, vlan_id);
+ return -EINVAL;
+ }
+
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
+ vport_num++;
+
+ if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
+ ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
+ is_kill);
+
+ return ret;
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
+ 0, is_kill);
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
@@ -4653,7 +4786,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
+ return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
}
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
@@ -4669,10 +4802,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
- vcfg->accept_tag ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
- vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
vcfg->insert_tag1_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
@@ -4796,8 +4933,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vport->txvlan_cfg.accept_tag = true;
- vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.accept_tag1 = true;
+ vport->txvlan_cfg.accept_untag1 = true;
+
+ /* accept_tag2 and accept_untag2 are not supported on
+ * pdev revision(0x20), new revision support them. The
+ * value of this two fields will not return error when driver
+ * send command to fireware in revision(0x20).
+ * This two fields can not configured by user.
+ */
+ vport->txvlan_cfg.accept_tag2 = true;
+ vport->txvlan_cfg.accept_untag2 = true;
+
vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag1 = 0;
@@ -4818,10 +4965,10 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
}
handle = &hdev->vport[0].nic;
- return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
+ return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
-static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -5166,12 +5313,6 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- /* Only support flow control negotiation for netdev with
- * phy attached for now.
- */
- if (!phydev)
- return -EOPNOTSUPP;
-
fc_autoneg = hclge_get_autoneg(handle);
if (auto_neg != fc_autoneg) {
dev_info(&hdev->pdev->dev,
@@ -5190,6 +5331,12 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
if (!fc_autoneg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
return phy_start_aneg(phydev);
}
@@ -5282,7 +5429,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->nic.client = client;
ret = client->ops->init_instance(&vport->nic);
if (ret)
- goto err;
+ return ret;
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
@@ -5290,11 +5437,11 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = hclge_init_roce_base_info(vport);
if (ret)
- goto err;
+ return ret;
ret = rc->ops->init_instance(&vport->roce);
if (ret)
- goto err;
+ return ret;
}
break;
@@ -5304,7 +5451,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&vport->nic);
if (ret)
- goto err;
+ return ret;
break;
case HNAE3_CLIENT_ROCE:
@@ -5316,18 +5463,16 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (hdev->roce_client && hdev->nic_client) {
ret = hclge_init_roce_base_info(vport);
if (ret)
- goto err;
+ return ret;
ret = client->ops->init_instance(&vport->roce);
if (ret)
- goto err;
+ return ret;
}
}
}
return 0;
-err:
- return ret;
}
static void hclge_uninit_client_instance(struct hnae3_client *client,
@@ -5364,7 +5509,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
- goto err_no_drvdata;
+ return ret;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -5402,8 +5547,6 @@ err_clr_master:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_no_drvdata:
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -5412,6 +5555,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ pcim_iounmap(pdev, hdev->hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -5427,7 +5571,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev) {
ret = -ENOMEM;
- goto err_hclge_dev;
+ goto out;
}
hdev->pdev = pdev;
@@ -5440,38 +5584,38 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI init failed\n");
- goto err_pci_init;
+ goto out;
}
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
- return ret;
+ goto err_pci_uninit;
}
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
if (ret)
- goto err_cmd_init;
+ goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
if (ret) {
dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_misc_irq_init(hdev);
@@ -5479,69 +5623,71 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_err(&pdev->dev,
"Misc IRQ(vector0) init error, ret = %d.\n",
ret);
- return ret;
+ goto err_msi_uninit;
}
ret = hclge_alloc_tqps(hdev);
if (ret) {
dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
ret = hclge_alloc_vport(hdev);
if (ret) {
dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
ret = hclge_map_tqp(hdev);
if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
- ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
- return ret;
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ ret = hclge_mac_mdio_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio config fail ret=%d\n", ret);
+ goto err_msi_irq_uninit;
+ }
}
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) {
dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_tm_schd_init(hdev);
if (ret) {
dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
hclge_rss_init_cfg(hdev);
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = init_mgr_tbl(hdev);
if (ret) {
dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
hclge_dcb_ops_set(hdev);
@@ -5551,6 +5697,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ hclge_clear_all_event_cause(hdev);
+
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@@ -5564,11 +5712,21 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
-err_cmd_init:
+err_mdiobus_unreg:
+ if (hdev->hw.mac.phydev)
+ mdiobus_unregister(hdev->hw.mac.mdio_bus);
+err_msi_irq_uninit:
+ hclge_misc_irq_uninit(hdev);
+err_msi_uninit:
+ pci_free_irq_vectors(pdev);
+err_cmd_uninit:
+ hclge_destroy_cmd_queue(&hdev->hw);
+err_pci_uninit:
+ pcim_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
pci_release_regions(pdev);
-err_pci_init:
- pci_set_drvdata(pdev, NULL);
-err_hclge_dev:
+ pci_disable_device(pdev);
+out:
return ret;
}
@@ -5586,6 +5744,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -5642,9 +5801,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -5658,9 +5814,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- if (IS_ENABLED(CONFIG_PCI_IOV))
- hclge_disable_sriov(hdev);
-
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
@@ -5675,6 +5828,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -5985,9 +6140,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
"Get 64 bit register failed, ret = %d.\n", ret);
}
-static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
- u8 act_led_status, u8 link_led_status,
- u8 locate_led_status)
+static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
{
struct hclge_set_led_state_cmd *req;
struct hclge_desc desc;
@@ -5996,12 +6149,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data;
- hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
- HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
- hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
- HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
- hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
- HCLGE_LED_LINK_STATE_S, link_led_status);
hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
HCLGE_LED_LOCATE_STATE_S, locate_led_status);
@@ -6022,105 +6169,17 @@ enum hclge_led_status {
static int hclge_set_led_id(struct hnae3_handle *handle,
enum ethtool_phys_id_state status)
{
-#define BLINK_FREQUENCY 2
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct phy_device *phydev = hdev->hw.mac.phydev;
- int ret = 0;
-
- if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
- return -EOPNOTSUPP;
switch (status) {
case ETHTOOL_ID_ACTIVE:
- ret = hclge_set_led_status_sfp(hdev,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_ON);
- break;
+ return hclge_set_led_status(hdev, HCLGE_LED_ON);
case ETHTOOL_ID_INACTIVE:
- ret = hclge_set_led_status_sfp(hdev,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_OFF);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-enum hclge_led_port_speed {
- HCLGE_SPEED_LED_FOR_1G,
- HCLGE_SPEED_LED_FOR_10G,
- HCLGE_SPEED_LED_FOR_25G,
- HCLGE_SPEED_LED_FOR_40G,
- HCLGE_SPEED_LED_FOR_50G,
- HCLGE_SPEED_LED_FOR_100G,
-};
-
-static u8 hclge_led_get_speed_status(u32 speed)
-{
- u8 speed_led;
-
- switch (speed) {
- case HCLGE_MAC_SPEED_1G:
- speed_led = HCLGE_SPEED_LED_FOR_1G;
- break;
- case HCLGE_MAC_SPEED_10G:
- speed_led = HCLGE_SPEED_LED_FOR_10G;
- break;
- case HCLGE_MAC_SPEED_25G:
- speed_led = HCLGE_SPEED_LED_FOR_25G;
- break;
- case HCLGE_MAC_SPEED_40G:
- speed_led = HCLGE_SPEED_LED_FOR_40G;
- break;
- case HCLGE_MAC_SPEED_50G:
- speed_led = HCLGE_SPEED_LED_FOR_50G;
- break;
- case HCLGE_MAC_SPEED_100G:
- speed_led = HCLGE_SPEED_LED_FOR_100G;
- break;
+ return hclge_set_led_status(hdev, HCLGE_LED_OFF);
default:
- speed_led = HCLGE_LED_NO_CHANGE;
+ return -EINVAL;
}
-
- return speed_led;
-}
-
-static int hclge_update_led_status(struct hclge_dev *hdev)
-{
- u8 port_speed_status, link_status, activity_status;
- u64 rx_pkts, tx_pkts;
-
- if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
- return 0;
-
- port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
-
- rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
- tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
- if (rx_pkts != hdev->rx_pkts_for_led ||
- tx_pkts != hdev->tx_pkts_for_led)
- activity_status = HCLGE_LED_ON;
- else
- activity_status = HCLGE_LED_OFF;
- hdev->rx_pkts_for_led = rx_pkts;
- hdev->tx_pkts_for_led = tx_pkts;
-
- if (hdev->hw.mac.link)
- link_status = HCLGE_LED_ON;
- else
- link_status = HCLGE_LED_OFF;
-
- return hclge_set_led_status_sfp(hdev, port_speed_status,
- activity_status, link_status,
- HCLGE_LED_NO_CHANGE);
}
static void hclge_get_link_mode(struct hnae3_handle *handle,
@@ -6190,6 +6249,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.rm_uc_addr = hclge_rm_uc_addr,
.add_mc_addr = hclge_add_mc_addr,
.rm_mc_addr = hclge_rm_mc_addr,
+ .update_mta_status = hclge_update_mta_status,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
@@ -6203,7 +6263,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
.enable_vlan_filter = hclge_enable_vlan_filter,
- .set_vlan_filter = hclge_set_port_vlan_filter,
+ .set_vlan_filter = hclge_set_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
@@ -6228,7 +6288,9 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- return hnae3_register_ae_algo(&ae_algo);
+ hnae3_register_ae_algo(&ae_algo);
+
+ return 0;
}
static void hclge_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0f4157e71282..7488534528cd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,10 +12,12 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/phy.h>
+#include <linux/if_vlan.h>
+
#include "hclge_cmd.h"
#include "hnae3.h"
-#define HCLGE_MOD_VERSION "v1.0"
+#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_INVALID_VPORT 0xffff
@@ -59,6 +61,8 @@
#define HCLGE_RSS_TC_SIZE_6 64
#define HCLGE_RSS_TC_SIZE_7 128
+#define HCLGE_MTA_TBL_SIZE 4096
+
#define HCLGE_TQP_RESET_TRY_TIMES 10
#define HCLGE_PHY_PAGE_MDIX 0
@@ -406,9 +410,9 @@ struct hclge_mac_stats {
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
- u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
- u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 rsv0;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
u64 mac_tx_1519_max_good_oct_pkt_num;
u64 mac_tx_1519_max_bad_oct_pkt_num;
@@ -433,9 +437,9 @@ struct hclge_mac_stats {
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
- u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
- u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 rsv1;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
u64 mac_rx_1519_max_good_oct_pkt_num;
u64 mac_rx_1519_max_bad_oct_pkt_num;
@@ -471,6 +475,7 @@ struct hclge_vlan_type_cfg {
u16 tx_in_vlan_type;
};
+#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -556,18 +561,18 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
- bool accept_mta_mc; /* Whether accept mta filter multicast */
struct hclge_vlan_type_cfg vlan_type_cfg;
- u64 rx_pkts_for_led;
- u64 tx_pkts_for_led;
+ unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
};
/* VPort level vlan tag configuration for TX direction */
struct hclge_tx_vtag_cfg {
- bool accept_tag; /* Whether accept tagged packet from host */
- bool accept_untag; /* Whether accept untagged packet from host */
+ bool accept_tag1; /* Whether accept tag1 packet from host */
+ bool accept_untag1; /* Whether accept untag1 packet from host */
+ bool accept_tag2;
+ bool accept_untag2;
bool insert_tag1_en; /* Whether insert inner vlan tag */
bool insert_tag2_en; /* Whether insert outer vlan tag */
u16 default_tag1; /* The default inner vlan tag to insert */
@@ -616,6 +621,9 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ bool accept_mta_mc; /* whether to accept mta filter multicast */
+ unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -633,6 +641,12 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable);
+int hclge_update_mta_status_common(struct hclge_vport *vport,
+ unsigned long *status,
+ u16 idx,
+ u16 count,
+ bool update_filter);
+
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
@@ -646,8 +660,9 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
-int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
- bool is_kill, u16 vlan, u8 qos, __be16 proto);
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a6f7ffa9c259..7541cb9b71ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -190,11 +190,12 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en = req->msg[1] ? true : false;
+ bool en_uc = req->msg[1] ? true : false;
+ bool en_mc = req->msg[2] ? true : false;
struct hclge_promisc_param param;
/* always enable broadcast promisc bit */
- hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ hclge_promisc_param_init(&param, en_uc, en_mc, true, vport->vport_id);
return hclge_cmd_set_promisc_mode(vport->back, &param);
}
@@ -230,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0;
}
+static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
+ u8 *msg, u8 idx, bool is_end)
+{
+#define HCLGE_MTA_STATUS_MSG_SIZE 13
+#define HCLGE_MTA_STATUS_MSG_BITS \
+ (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
+#define HCLGE_MTA_STATUS_MSG_END_BITS \
+ (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
+ unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
+ u16 tbl_cnt;
+ u16 tbl_idx;
+ u8 msg_ofs;
+ u8 msg_bit;
+
+ tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
+ HCLGE_MTA_STATUS_MSG_BITS;
+
+ /* set msg field */
+ msg_ofs = 0;
+ msg_bit = 0;
+ memset(status, 0, sizeof(status));
+ for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
+ if (msg[msg_ofs] & BIT(msg_bit))
+ set_bit(tbl_idx, status);
+
+ msg_bit++;
+ if (msg_bit == BITS_PER_BYTE) {
+ msg_bit = 0;
+ msg_ofs++;
+ }
+ }
+
+ return hclge_update_mta_status_common(vport,
+ status, idx * HCLGE_MTA_STATUS_MSG_BITS,
+ tbl_cnt, is_end);
+}
+
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
struct hclge_dev *hdev = vport->back;
+ u8 resp_len = 0;
+ u8 resp_data;
int status;
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
@@ -247,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
bool enable = mbx_req->msg[2];
status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
+ resp_data = hdev->mta_mac_sel_type;
+ resp_len = sizeof(u8);
+ gen_resp = true;
+ status = 0;
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
+ /* mta status update msg format
+ * msg[2.6 : 2.0] msg index
+ * msg[2.7] msg is end
+ * msg[15 : 3] mta status bits[103 : 0]
+ */
+ bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
+
+ status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
+ mbx_req->msg[2] & 0x7F,
+ is_end);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
@@ -255,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
}
if (gen_resp)
- hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+ hclge_gen_resp_to_vf(vport, mbx_req, status,
+ &resp_data, resp_len);
return 0;
}
@@ -264,19 +321,23 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
- struct hclge_dev *hdev = vport->back;
int status = 0;
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
bool is_kill;
is_kill = !!mbx_req->msg[2];
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
- status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- is_kill, vlan, 0,
- cpu_to_be16(proto));
+ status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
+ vlan, is_kill);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ struct hnae3_handle *handle = &vport->nic;
+ bool en = mbx_req->msg[2] ? true : false;
+
+ status = hclge_en_hw_strip_rxvtag(handle, en);
}
if (gen_resp)
@@ -378,6 +439,13 @@ static void hclge_reset_vf(struct hclge_vport *vport,
hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
}
+static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
+{
+ u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->cmq.crq.next_to_use;
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -386,12 +454,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_desc *desc;
int ret, flag;
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
/* handle all the mailbox requests in the queue */
- while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) {
+ while (!hclge_cmd_crq_empty(&hdev->hw)) {
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %d\n",
+ req->msg[0]);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
vport = &hdev->vport[req->mbx_src_vfid];
switch (req->msg[0]) {
@@ -466,7 +545,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
}
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 682c2d6618e7..9f7932e423b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -140,8 +140,11 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
struct mii_bus *mdio_bus;
int ret;
- if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR)
- return 0;
+ if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ hdev->hw.mac.phy_addr);
+ return -EINVAL;
+ }
mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
if (!mdio_bus)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 885f25cd7be4..262c125f8137 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -134,11 +134,8 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get pfc pause stats fail, ret = %d.\n", ret);
+ if (ret)
return ret;
- }
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
struct hclge_pfc_stats_cmd *pfc_stats =
@@ -503,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
+static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
+ u32 bit_map)
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_desc desc;
@@ -514,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
bp_to_qs_map_cmd->tc_id = tc;
-
- /* Qset and tc is one by one mapping */
- bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
+ bp_to_qs_map_cmd->qs_group_id = grp_id;
+ bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1170,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map);
}
+/* Each Tc has a 1024 queue sets to backpress, it divides to
+ * 32 group, each group contains 32 queue sets, which can be
+ * represented by u32 bitmap.
+ */
+static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u32 i, k, qs_bitmap;
+ int ret;
+
+ for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+ qs_bitmap = 0;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ u16 qs_id = vport->qs_offset + tc;
+ u8 grp, sub_grp;
+
+ grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
+ if (i == grp)
+ qs_bitmap |= (1 << sub_grp);
+
+ vport++;
+ }
+
+ ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
{
bool tx_en, rx_en;
@@ -1221,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_qs_bp_cfg(hdev, i);
+ ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 2dbe177581e9..c2b6e8a6700f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para;
};
+#define HCLGE_BP_GRP_NUM 32
+#define HCLGE_BP_SUB_GRP_ID_S 0
+#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
+#define HCLGE_BP_GRP_ID_S 5
+#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
struct hclge_bp_to_qs_map_cmd {
u8 tc_id;
u8 rsvd[2];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2b8426412cc9..a17872aab168 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -654,7 +654,8 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
@@ -664,7 +665,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
- req->msg[1] = en;
+ req->msg[1] = en_uc_pmc ? 1 : 0;
+ req->msg[2] = en_mc_pmc ? 1 : 0;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -674,11 +676,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
return status;
}
-static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- hclgevf_cmd_set_promisc_mode(hdev, en);
+ hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -725,15 +728,124 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
-static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
{
+ u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
+ NULL, 0, true, &resp_msg, sizeof(u8));
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Read mta type fail, ret=%d.\n", ret);
+ return ret;
+ }
+
+ if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
+ dev_err(&hdev->pdev->dev,
+ "Read mta type invalid, resp=%d.\n", resp_msg);
+ return -EINVAL;
+ }
+
+ hdev->mta_mac_sel_type = resp_msg;
+
+ return 0;
+}
+
+static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
+ const u8 *addr)
+{
+ u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
+ u16 high_val = addr[1] | (addr[0] << 8);
+
+ return (high_val >> rsh) & 0xfff;
+}
+
+static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
+ unsigned long *status)
+{
+#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
+#define HCLGEVF_MTA_STATUS_MSG_BITS \
+ (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
+#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
+ (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
+ u16 tbl_cnt;
+ u16 tbl_idx;
+ u8 msg_cnt;
+ u8 msg_idx;
+ int ret;
+
+ msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
+ HCLGEVF_MTA_STATUS_MSG_BITS);
+ tbl_idx = 0;
+ msg_idx = 0;
+ while (msg_cnt--) {
+ u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
+ u8 *p = &msg[1];
+ u8 msg_ofs;
+ u8 msg_bit;
+
+ memset(msg, 0, sizeof(msg));
+
+ /* set index field */
+ msg[0] = 0x7F & msg_idx;
+
+ /* set end flag field */
+ if (msg_cnt == 0) {
+ msg[0] |= 0x80;
+ tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
+ } else {
+ tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
+ }
+
+ /* set status field */
+ msg_ofs = 0;
+ msg_bit = 0;
+ while (tbl_cnt--) {
+ if (test_bit(tbl_idx, status))
+ p[msg_ofs] |= BIT(msg_bit);
+
+ tbl_idx++;
+
+ msg_bit++;
+ if (msg_bit == BITS_PER_BYTE) {
+ msg_bit = 0;
+ msg_ofs++;
+ }
+ }
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
+ msg, sizeof(msg), false, NULL, 0);
+ if (ret)
+ break;
+
+ msg_idx++;
+ }
+
+ return ret;
+}
+
+static int hclgevf_update_mta_status(struct hnae3_handle *handle)
+{
+ unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg[2] = {0};
+ struct net_device *netdev = hdev->nic.kinfo.netdev;
+ struct netdev_hw_addr *ha;
+ u16 tbl_idx;
- msg[0] = en;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
- msg, 1, false, NULL, 0);
+ /* clear status */
+ memset(mta_status, 0, sizeof(mta_status));
+
+ /* update status from mc addr list */
+ netdev_for_each_mc_addr(ha, netdev) {
+ tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
+ set_bit(tbl_idx, mta_status);
+ }
+
+ return hclgevf_do_update_mta_status(hdev, mta_status);
}
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
@@ -830,6 +942,17 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
+ 1, false, NULL, 0);
+}
+
static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1323,6 +1446,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
hclgevf_reset_tqp_stats(handle);
del_timer_sync(&hdev->service_timer);
cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
hclgevf_update_link_status(hdev, 0);
}
@@ -1441,6 +1565,8 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
return ret;
}
+ hclgevf_clear_event_cause(hdev, 0);
+
/* enable misc. vector(vector 0) */
hclgevf_enable_vector(&hdev->misc_vector, true);
@@ -1451,6 +1577,7 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
{
/* disable misc vector(vector 0) */
hclgevf_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
free_irq(hdev->misc_vector.vector_irq, hdev);
hclgevf_free_vector(hdev, 0);
}
@@ -1489,10 +1616,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return ret;
break;
case HNAE3_CLIENT_ROCE:
- hdev->roce_client = client;
- hdev->roce.client = client;
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+ }
- if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ if (hdev->roce_client && hdev->nic_client) {
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
return ret;
@@ -1552,7 +1681,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
- goto err_no_drvdata;
+ return ret;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -1584,8 +1713,7 @@ err_clr_master:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_no_drvdata:
- pci_set_drvdata(pdev, NULL);
+
return ret;
}
@@ -1597,7 +1725,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
@@ -1625,6 +1752,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev);
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1632,10 +1763,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -1654,12 +1781,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* Initialize VF's MTA */
- hdev->accept_mta_mc = true;
- ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+ /* Initialize mta type for this VF */
+ ret = hclgevf_cfg_func_mta_type(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to set mta filter mode\n", ret);
+ "failed(%d) to initialize MTA type\n", ret);
goto err_config;
}
@@ -1683,10 +1809,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return 0;
err_config:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_irq_init:
@@ -1696,9 +1822,9 @@ err_irq_init:
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
- hclgevf_cmd_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
hclgevf_state_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_cmd_uninit(hdev);
hclgevf_uninit_msi(hdev);
hclgevf_pci_uninit(hdev);
}
@@ -1814,6 +1940,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.rm_uc_addr = hclgevf_rm_uc_addr,
.add_mc_addr = hclgevf_add_mc_addr,
.rm_mc_addr = hclgevf_rm_mc_addr,
+ .update_mta_status = hclgevf_update_mta_status,
.get_stats = hclgevf_get_stats,
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
@@ -1825,6 +1952,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
@@ -1842,7 +1970,9 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- return hnae3_register_ae_algo(&ae_algovf);
+ hnae3_register_ae_algo(&ae_algovf);
+
+ return 0;
}
static void hclgevf_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a477a7c36bbd..0656e8e5c5f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -9,7 +9,7 @@
#include "hclgevf_cmd.h"
#include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_ROCEE_VECTOR_NUM 0
@@ -48,6 +48,9 @@
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+#define HCLGEVF_MTA_TBL_SIZE 4096
+#define HCLGEVF_MTA_TYPE_SEL_MAX 4
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
@@ -152,6 +155,7 @@ struct hclgevf_dev {
int *vector_irq;
bool accept_mta_mc; /* whether to accept mta filter multicast */
+ u8 mta_mac_sel_type;
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index a28618428338..b598c06af8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -126,6 +126,13 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
return status;
}
+static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
+{
+ u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->cmq.crq.next_to_use;
+}
+
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
struct hclgevf_mbx_resp_status *resp;
@@ -140,11 +147,22 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
resp = &hdev->mbx_resp;
crq = &hdev->hw.cmq.crq;
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+ while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %d\n",
+ req->msg[0]);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
/* synchronous messages are time critical and need preferential
* treatment. Therefore, we need to acknowledge all the sync
* responses as quickly as possible so that waiting tasks do not
@@ -205,7 +223,6 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
}
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig
index 08db24954f7e..e4e8b24c1a5d 100644
--- a/drivers/net/ethernet/huawei/hinic/Kconfig
+++ b/drivers/net/ethernet/huawei/hinic/Kconfig
@@ -4,7 +4,7 @@
config HINIC
tristate "Huawei Intelligent PCIE Network Interface Card"
- depends on (PCI_MSI && X86)
+ depends on (PCI_MSI && (X86 || ARM64))
---help---
This driver supports HiNIC PCIE Ethernet cards.
To compile this driver as part of the kernel, choose Y here.
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index eb53bd93065e..5b122728dcb4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -51,7 +51,9 @@ static unsigned int rx_weight = 64;
module_param(rx_weight, uint, 0644);
MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
-#define PCI_DEVICE_ID_HI1822_PF 0x1822
+#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
+#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200
+#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201
#define HINIC_WQ_NAME "hinic_dev"
@@ -1097,7 +1099,9 @@ static void hinic_remove(struct pci_dev *pdev)
}
static const struct pci_device_id hinic_pci_table[] = {
- { PCI_VDEVICE(HUAWEI, PCI_DEVICE_ID_HI1822_PF), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ec1185808e5..d0e196bff081 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -109,13 +109,14 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_map_query(struct ibmvnic_adapter *adapter);
-static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
-static void send_request_unmap(struct ibmvnic_adapter *, u8);
+static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
+static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
+static int ibmvnic_reset_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@ -172,6 +173,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
+ int rc;
ltb->size = size;
ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
@@ -185,8 +187,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->map_id++;
init_completion(&adapter->fw_done);
- send_request_map(adapter, ltb->addr,
- ltb->size, ltb->map_id);
+ rc = send_request_map(adapter, ltb->addr,
+ ltb->size, ltb->map_id);
+ if (rc) {
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ return rc;
+ }
wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
@@ -215,10 +221,14 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
+ int rc;
+
memset(ltb->buff, 0, ltb->size);
init_completion(&adapter->fw_done);
- send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ if (rc)
+ return rc;
wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
@@ -789,6 +799,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
kfree(adapter->napi);
adapter->napi = NULL;
adapter->num_active_rx_napi = 0;
+ adapter->napi_enabled = false;
}
static int ibmvnic_login(struct net_device *netdev)
@@ -924,6 +935,10 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
/* Partuial success, delay and re-send */
mdelay(1000);
resend = true;
+ } else if (adapter->init_done_rc) {
+ netdev_warn(netdev, "Unable to set link state, rc=%d\n",
+ adapter->init_done_rc);
+ return adapter->init_done_rc;
}
} while (resend);
@@ -956,6 +971,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int len = 0;
+ int rc;
if (adapter->vpd->buff)
len = adapter->vpd->len;
@@ -963,7 +979,9 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
init_completion(&adapter->fw_done);
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
- ibmvnic_send_crq(adapter, &crq);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return rc;
wait_for_completion(&adapter->fw_done);
if (!adapter->vpd->len)
@@ -996,7 +1014,12 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
crq.get_vpd.cmd = GET_VPD;
crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
- ibmvnic_send_crq(adapter, &crq);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ kfree(adapter->vpd->buff);
+ adapter->vpd->buff = NULL;
+ return rc;
+ }
wait_for_completion(&adapter->fw_done);
return 0;
@@ -1695,6 +1718,7 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
union ibmvnic_crq crq;
+ int rc;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1705,7 +1729,9 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
init_completion(&adapter->fw_done);
- ibmvnic_send_crq(adapter, &crq);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return rc;
wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
return adapter->fw_done_rc ? -EIO : 0;
@@ -1787,7 +1813,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter);
if (rc)
return IBMVNIC_INIT_FAILED;
@@ -1857,6 +1883,85 @@ static int do_reset(struct ibmvnic_adapter *adapter,
return 0;
}
+static int do_hard_reset(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rwi *rwi, u32 reset_state)
+{
+ struct net_device *netdev = adapter->netdev;
+ int rc;
+
+ netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
+ rwi->reset_reason);
+
+ netif_carrier_off(netdev);
+ adapter->reset_reason = rwi->reset_reason;
+
+ ibmvnic_cleanup(netdev);
+ release_resources(adapter);
+ release_sub_crqs(adapter, 0);
+ release_crq_queue(adapter);
+
+ /* remove the closed state so when we call open it appears
+ * we are coming from the probed state.
+ */
+ adapter->state = VNIC_PROBED;
+
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Couldn't initialize crq. rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ibmvnic_init(adapter);
+ if (rc)
+ return rc;
+
+ /* If the adapter was in PROBE state prior to the reset,
+ * exit here.
+ */
+ if (reset_state == VNIC_PROBED)
+ return 0;
+
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ adapter->state = VNIC_PROBED;
+ return 0;
+ }
+ /* netif_set_real_num_xx_queues needs to take rtnl lock here
+ * unless wait_for_reset is set, in which case the rtnl lock
+ * has already been taken before initializing the reset
+ */
+ if (!adapter->wait_for_reset) {
+ rtnl_lock();
+ rc = init_resources(adapter);
+ rtnl_unlock();
+ } else {
+ rc = init_resources(adapter);
+ }
+ if (rc)
+ return rc;
+
+ ibmvnic_disable_irqs(adapter);
+ adapter->state = VNIC_CLOSED;
+
+ if (reset_state == VNIC_CLOSED)
+ return 0;
+
+ rc = __ibmvnic_open(netdev);
+ if (rc) {
+ if (list_empty(&adapter->rwi_list))
+ adapter->state = VNIC_CLOSED;
+ else
+ adapter->state = reset_state;
+
+ return 0;
+ }
+
+ netif_carrier_on(netdev);
+
+ return 0;
+}
+
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rwi *rwi;
@@ -1898,14 +2003,19 @@ static void __ibmvnic_reset(struct work_struct *work)
netdev = adapter->netdev;
mutex_lock(&adapter->reset_lock);
- adapter->resetting = true;
reset_state = adapter->state;
rwi = get_next_rwi(adapter);
while (rwi) {
- rc = do_reset(adapter, rwi, reset_state);
+ if (adapter->force_reset_recovery) {
+ adapter->force_reset_recovery = false;
+ rc = do_hard_reset(adapter, rwi, reset_state);
+ } else {
+ rc = do_reset(adapter, rwi, reset_state);
+ }
kfree(rwi);
- if (rc && rc != IBMVNIC_INIT_FAILED)
+ if (rc && rc != IBMVNIC_INIT_FAILED &&
+ !adapter->force_reset_recovery)
break;
rwi = get_next_rwi(adapter);
@@ -1931,9 +2041,9 @@ static void __ibmvnic_reset(struct work_struct *work)
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
enum ibmvnic_reset_reason reason)
{
+ struct list_head *entry, *tmp_entry;
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
- struct list_head *entry;
int ret;
if (adapter->state == VNIC_REMOVING ||
@@ -1969,11 +2079,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
ret = ENOMEM;
goto err;
}
-
+ /* if we just received a transport event,
+ * flush reset queue and process this reset
+ */
+ if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
+ list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
+ list_del(entry);
+ }
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
mutex_unlock(&adapter->rwi_lock);
-
+ adapter->resetting = true;
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
@@ -2369,6 +2485,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
struct ibmvnic_adapter *adapter = netdev_priv(dev);
union ibmvnic_crq crq;
int i, j;
+ int rc;
memset(&crq, 0, sizeof(crq));
crq.request_statistics.first = IBMVNIC_CRQ_CMD;
@@ -2379,7 +2496,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
/* Wait for data to be written */
init_completion(&adapter->stats_done);
- ibmvnic_send_crq(adapter, &crq);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return;
wait_for_completion(&adapter->stats_done);
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -3154,6 +3273,12 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
(unsigned long int)cpu_to_be64(u64_crq[0]),
(unsigned long int)cpu_to_be64(u64_crq[1]));
+ if (!adapter->crq.active &&
+ crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
+ dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
+ return -EINVAL;
+ }
+
/* Make sure the hypervisor sees the complete request */
mb();
@@ -3378,8 +3503,8 @@ buf_alloc_failed:
return -1;
}
-static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
- u32 len, u8 map_id)
+static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
+ u32 len, u8 map_id)
{
union ibmvnic_crq crq;
@@ -3389,10 +3514,10 @@ static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
crq.request_map.map_id = map_id;
crq.request_map.ioba = cpu_to_be32(addr);
crq.request_map.len = cpu_to_be32(len);
- ibmvnic_send_crq(adapter, &crq);
+ return ibmvnic_send_crq(adapter, &crq);
}
-static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
+static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
{
union ibmvnic_crq crq;
@@ -3400,7 +3525,7 @@ static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
crq.request_unmap.first = IBMVNIC_CRQ_CMD;
crq.request_unmap.cmd = REQUEST_UNMAP;
crq.request_unmap.map_id = map_id;
- ibmvnic_send_crq(adapter, &crq);
+ return ibmvnic_send_crq(adapter, &crq);
}
static void send_map_query(struct ibmvnic_adapter *adapter)
@@ -4227,11 +4352,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
adapter->failover_pending = false;
- complete(&adapter->init_done);
+ if (!completion_done(&adapter->init_done)) {
+ complete(&adapter->init_done);
+ adapter->init_done_rc = -EIO;
+ }
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
+ adapter->crq.active = true;
send_version_xchg(adapter);
break;
default:
@@ -4240,6 +4369,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
return;
case IBMVNIC_CRQ_XPORT_EVENT:
netif_carrier_off(netdev);
+ adapter->crq.active = false;
+ if (adapter->resetting)
+ adapter->force_reset_recovery = true;
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Migrated, re-enabling adapter\n");
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
@@ -4427,6 +4559,7 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
/* Clean out the queue */
memset(crq->msgs, 0, PAGE_SIZE);
crq->cur = 0;
+ crq->active = false;
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@ -4461,6 +4594,7 @@ static void release_crq_queue(struct ibmvnic_adapter *adapter)
DMA_BIDIRECTIONAL);
free_page((unsigned long)crq->msgs);
crq->msgs = NULL;
+ crq->active = false;
}
static int init_crq_queue(struct ibmvnic_adapter *adapter)
@@ -4538,7 +4672,7 @@ map_failed:
return retrc;
}
-static int ibmvnic_init(struct ibmvnic_adapter *adapter)
+static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
@@ -4597,6 +4731,49 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
return rc;
}
+static int ibmvnic_init(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ adapter->from_passive_init = false;
+
+ init_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+ ibmvnic_send_crq_init(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Initialization sequence timed out\n");
+ return -1;
+ }
+
+ if (adapter->init_done_rc) {
+ release_crq_queue(adapter);
+ return adapter->init_done_rc;
+ }
+
+ if (adapter->from_passive_init) {
+ adapter->state = VNIC_OPEN;
+ adapter->from_passive_init = false;
+ return -1;
+ }
+
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev, "Initialization of sub crqs failed\n");
+ release_crq_queue(adapter);
+ return rc;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "Failed to initialize sub crq irqs\n");
+ release_crq_queue(adapter);
+ }
+
+ return rc;
+}
+
static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 22391e8805f6..f9fb780102ac 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -865,6 +865,7 @@ struct ibmvnic_crq_queue {
int size, cur;
dma_addr_t msg_token;
spinlock_t lock;
+ bool active;
};
union sub_crq {
@@ -1108,6 +1109,7 @@ struct ibmvnic_adapter {
bool mac_change_pending;
bool failover_pending;
+ bool force_reset_recovery;
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 41ad56edfb96..27d5f27163d2 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel PRO/100 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/*
* e100.c: Intel(R) PRO/100 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index c7caadd3c8af..314c52d44b7c 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2006 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 3a0feea2df54..c40729b2c184 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -1,32 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3e80ca170dd7..5d365a986bb0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- * Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2006 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* ethtool support for e1000 */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 6e7e923d57bf..48428d6a00be 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-*
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
- */
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* e1000_hw.c
* Shared functions for accessing and configuring the MAC
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index f09c569ec19b..b57a04954ccf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* e1000_hw.h
* Structures, enums, and macros for the MAC
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d5eb19b86a0a..2110d5f2da19 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
#include "e1000.h"
#include <net/ip6_checksum.h>
diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
index ae0559b8b011..e966bb290797 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_osdep.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
@@ -1,32 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* glue for the OS independent part of e1000
* includes register access macros
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 345f23927bcc..d3f29ffe1e47 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 953e99df420c..257bd59bc9c6 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index ee6d1256fda4..aa9d639c6cbb 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_80003ES2LAN_H_
#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 924f2c8dfa6c..b9309302c29e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 9a24c645f726..834c238d02db 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_82571_H_
#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 24e391a4ac68..44e58b6e7660 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,30 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2014 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 22883015a695..fd550dee4982 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index da88555ba1fd..c760dc72c520 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 64dc0c11147f..e084cb734eb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* ethtool support for e1000 */
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 21802396bed6..eff75bd8a8f0 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 1551d6ce5341..cdae0efde8e6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 3c4f82c21084..eb09c755fa17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_ICH8LAN_H_
#define _E1000E_ICH8LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b293464a9f27..4abd55d646c5 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index cb0abf6c76a5..6ab261119801 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_MAC_H_
#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e027660aeb92..c4c9b20bc51f 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 3268f2e58593..d868aad806d4 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_MANAGE_H_
#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ec4a9759a6f2..acf1e8b52b8e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -3546,15 +3527,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
}
break;
case e1000_pch_spt:
- if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
- /* Stable 24MHz frequency */
- incperiod = INCPERIOD_24MHZ;
- incvalue = INCVALUE_24MHZ;
- shift = INCVALUE_SHIFT_24MHZ;
- adapter->cc.shift = shift;
- break;
- }
- return -EINVAL;
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_pch_cnp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 68949bb41b7b..937f9af22d26 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 8e082028be7d..6a30dfea4117 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_NVM_H_
#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 2def33eba9e6..098369fd3e65 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/netdevice.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b8226ed0e338..42233019255a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index d4180b5e9196..c48777d09523 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_PHY_H_
#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index f941e5085f44..37c76945ad9b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* PTP 1588 Hardware Clock (PHC)
* Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 16afc3c2a986..47f5ca793970 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_REGS_H_
#define _E1000E_REGS_H_
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index 93277cb99cb7..26a9746ccb14 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,26 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel(R) Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2016 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 2013 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a9cdf763c59d..a903a0ba45e1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_H_
#define _FM10K_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index e303d88720ef..f51a63fca513 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_common.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 2bdb24d2ca9d..4c48fb73b3e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_COMMON_H_
#define _FM10K_COMMON_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index c4f733452ef2..20768ac7f17e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 43e8d839831f..dca104121c05 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 28b6b4e56487..7657daa27298 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,40 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/vmalloc.h>
#include "fm10k.h"
struct fm10k_stats {
+ /* The stat_string is expected to be a format string formatted using
+ * vsnprintf by fm10k_add_stat_strings. Every member of a stats array
+ * should use the same format specifiers as they will be formatted
+ * using the same variadic arguments.
+ */
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
-#define FM10K_NETDEV_STAT(_net_stat) { \
- .stat_string = #_net_stat, \
- .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
- .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+#define FM10K_STAT_FIELDS(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
}
+/* netdevice statistics */
+#define FM10K_NETDEV_STAT(_net_stat) \
+ FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \
+ _net_stat)
+
static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
FM10K_NETDEV_STAT(tx_packets),
FM10K_NETDEV_STAT(tx_bytes),
@@ -52,11 +44,9 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
#define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats)
-#define FM10K_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \
- .stat_offset = offsetof(struct fm10k_intfc, _stat) \
-}
+/* General interface statistics */
+#define FM10K_STAT(_name, _stat) \
+ FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat)
static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("tx_restart_queue", restart_queue),
@@ -93,11 +83,9 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
};
-#define FM10K_MBX_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \
- .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \
-}
+/* mailbox statistics */
+#define FM10K_MBX_STAT(_name, _stat) \
+ FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat)
static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
@@ -111,15 +99,13 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
};
-#define FM10K_QUEUE_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
- .stat_offset = offsetof(struct fm10k_ring, _stat) \
-}
+/* per-queue ring statistics */
+#define FM10K_QUEUE_STAT(_name, _stat) \
+ FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat)
static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
- FM10K_QUEUE_STAT("packets", stats.packets),
- FM10K_QUEUE_STAT("bytes", stats.bytes),
+ FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets),
+ FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes),
};
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
@@ -149,49 +135,44 @@ enum {
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
};
-static void fm10k_add_stat_strings(u8 **p, const char *prefix,
- const struct fm10k_stats stats[],
- const unsigned int size)
+static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[],
+ const unsigned int size, ...)
{
unsigned int i;
for (i = 0; i < size; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%s%s",
- prefix, stats[i].stat_string);
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
*p += ETH_GSTRING_LEN;
+ va_end(args);
}
}
+#define fm10k_add_stat_strings(p, stats, ...) \
+ __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
unsigned int i;
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats,
- FM10K_NETDEV_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats);
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats,
- FM10K_GLOBAL_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats);
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats,
- FM10K_MBX_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats);
if (interface->hw.mac.type != fm10k_mac_vf)
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats,
- FM10K_PF_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats);
for (i = 0; i < interface->hw.mac.max_queues; i++) {
- char prefix[ETH_GSTRING_LEN];
-
- snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
- fm10k_add_stat_strings(&data, prefix,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
+ "tx", i);
- snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
- fm10k_add_stat_strings(&data, prefix,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
+ "rx", i);
}
}
@@ -236,9 +217,9 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
- const struct fm10k_stats stats[],
- const unsigned int size)
+static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
{
unsigned int i;
char *p;
@@ -267,11 +248,16 @@ static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
*((*data)++) = *(u8 *)p;
break;
default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stats[i].stat_string);
*((*data)++) = 0;
}
}
}
+#define fm10k_add_ethtool_stats(data, pointer, stats) \
+ __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
static void fm10k_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats __always_unused *stats,
u64 *data)
@@ -282,20 +268,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
fm10k_update_stats(interface);
- fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
- FM10K_NETDEV_STATS_LEN);
+ fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats);
- fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
- FM10K_GLOBAL_STATS_LEN);
+ fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats);
fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
- fm10k_gstrings_mbx_stats,
- FM10K_MBX_STATS_LEN);
+ fm10k_gstrings_mbx_stats);
if (interface->hw.mac.type != fm10k_mac_vf) {
fm10k_add_ethtool_stats(&data, interface,
- fm10k_gstrings_pf_stats,
- FM10K_PF_STATS_LEN);
+ fm10k_gstrings_pf_stats);
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
@@ -303,13 +285,11 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
ring = interface->tx_ring[i];
fm10k_add_ethtool_stats(&data, ring,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_gstrings_queue_stats);
ring = interface->rx_ring[i];
fm10k_add_ethtool_stats(&data, ring,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_gstrings_queue_stats);
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 30395f5e5e87..e707d717012f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index df8607097e4a..3f536541f45f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -445,15 +427,14 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
l2_accel = NULL;
}
- skb->protocol = eth_type_trans(skb, dev);
-
/* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
- (skb->pkt_type == PACKET_BROADCAST) ||
- (skb->pkt_type == PACKET_MULTICAST));
+ false);
+
+ skb->protocol = eth_type_trans(skb, dev);
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index c01bf30a0c9e..21021fe4f1c3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_common.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 007e1dfa9b7a..56d1abff04e2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_MBX_H_
#define _FM10K_MBX_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 45793491d4ba..929f538d28bc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,27 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
#include <linux/vmalloc.h>
#include <net/udp_tunnel.h>
+#include <linux/if_macvlan.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -924,7 +907,9 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_l2_accel *l2_accel = interface->l2_accel;
struct fm10k_hw *hw = &interface->hw;
+ u16 glort;
s32 err;
int i;
@@ -992,6 +977,22 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (err)
goto err_out;
+ /* Update L2 accelerated macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ vid, set);
+ }
+ }
+
/* set VLAN ID prior to syncing/unsyncing the VLAN */
interface->vid = vid + (set ? VLAN_N_VID : 0);
@@ -1231,6 +1232,22 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_queue_mac_request(interface, glort,
hw->mac.addr, vid, true);
+
+ /* synchronize macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ vid, true);
+ }
+ }
}
/* update xcast mode before synchronizing addresses if host's mailbox
@@ -1254,7 +1271,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
glort = l2_accel->dglort + 1 + i;
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort,
sdev->dev_addr,
hw->mac.default_vid, true);
@@ -1447,7 +1464,14 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
int size = 0, i;
- u16 glort;
+ u16 vid, glort;
+
+ /* The hardware supported by fm10k only filters on the destination MAC
+ * address. In order to avoid issues we only support offloading modes
+ * where the hardware can actually provide the functionality.
+ */
+ if (!macvlan_supports_dest_filter(sdev))
+ return ERR_PTR(-EMEDIUMTYPE);
/* allocate l2 accel structure if it is not available */
if (!l2_accel) {
@@ -1513,12 +1537,18 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
glort = l2_accel->dglort + 1 + i;
- if (fm10k_host_mbx_ready(interface)) {
+ if (fm10k_host_mbx_ready(interface))
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
+
+ fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
+ hw->mac.default_vid, true);
+
+ for (vid = fm10k_find_next_vlan(interface, 0);
+ vid < VLAN_N_VID;
+ vid = fm10k_find_next_vlan(interface, vid))
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- hw->mac.default_vid, true);
- }
+ vid, true);
fm10k_mbx_unlock(interface);
@@ -1532,8 +1562,8 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
struct net_device *sdev = priv;
+ u16 vid, glort;
int i;
- u16 glort;
if (!l2_accel)
return;
@@ -1553,12 +1583,18 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
glort = l2_accel->dglort + 1 + i;
- if (fm10k_host_mbx_ready(interface)) {
+ if (fm10k_host_mbx_ready(interface))
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_NONE);
+
+ fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
+ hw->mac.default_vid, false);
+
+ for (vid = fm10k_find_next_vlan(interface, 0);
+ vid < VLAN_N_VID;
+ vid = fm10k_find_next_vlan(interface, vid))
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- hw->mac.default_vid, false);
- }
+ vid, false);
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index c4a2b688b38b..15071e4adb98 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/module.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 7ba54c534f8c..8f0a99b6a537 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_pf.h"
#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index ae81f9a16602..8e814df709d2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_PF_H_
#define _FM10K_PF_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 725ecb7abccd..2a7a40bf2b1c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_tlv.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 5d2ee759507e..160bc5b78f99 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index dd23af11e2c1..3e608e493f9d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index f06913630b39..a8519c1f0406 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index 66a66b73a2f1..787d0d570a28 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_VF_H_
#define _FM10K_VF_H_
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 75437768a07c..14397e7e9925 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,29 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 2013 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a44139c1de80..7a80652e2500 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_H_
#define _I40E_H_
@@ -334,10 +310,12 @@ struct i40e_tc_configuration {
struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
};
+#define I40E_UDP_PORT_INDEX_UNUSED 255
struct i40e_udp_port_config {
/* AdminQ command interface expects port number in Host byte order */
u16 port;
u8 type;
+ u8 filter_index;
};
/* macros related to FLX_PIT */
@@ -608,7 +586,7 @@ struct i40e_pf {
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
- u64 ptp_base_adj;
+ u32 ptp_adj_mult;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
@@ -1009,6 +987,9 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp,
+ bool enable);
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable);
int i40e_vsi_start_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 843fc7781ef8..ddbea79d18e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_status.h"
#include "i40e_type.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 0a8749ee9fd3..edec3df78971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 0244923edeb8..7d888e05f96f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index abed0c52e782..cb8689222c8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d8ce4999864f..5f3b8b9ff511 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/list.h>
#include <linux/errno.h>
@@ -64,7 +40,7 @@ static struct i40e_ops i40e_lan_ops = {
/**
* i40e_client_get_params - Get the params that can change at runtime
* @vsi: the VSI with the message
- * @param: clinet param struct
+ * @params: client param struct
*
**/
static
@@ -590,7 +566,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
* i40e_client_setup_qvlist
* @ldev: pointer to L2 context.
* @client: Client pointer.
- * @qv_info: queue and vector list
+ * @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
@@ -665,7 +641,7 @@ err:
* i40e_client_request_reset
* @ldev: pointer to L2 context.
* @client: Client pointer.
- * @level: reset level
+ * @reset_level: reset level
**/
static void i40e_client_request_reset(struct i40e_info *ldev,
struct i40e_client *client,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 9d464d40bc17..72994baf4941 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c0a3dae8a2db..eb2d1530d331 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_type.h"
#include "i40e_adminq.h"
@@ -1695,6 +1671,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
/**
* i40e_set_fc
* @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
*
* Set the requested flow control mode using set_phy_config.
**/
@@ -2831,8 +2809,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
@@ -2892,8 +2870,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
@@ -2923,8 +2901,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* add_mirrorrule.
* @mr_list: list of mirrored VLAN IDs to be removed
* @cmd_details: pointer to command details structure or NULL
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
@@ -3672,6 +3650,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
@@ -3752,7 +3732,6 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add in Host byte order
- * @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
@@ -3971,6 +3950,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: seid of the switching component connected to Physical Port
* @ets_data: Buffer holding ETS parameters
+ * @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
@@ -4314,10 +4294,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: VSI seid to add ethertype filter from
**/
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 seid)
{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -4448,6 +4428,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
*
* Dump internal FW/HW data for debug purposes.
*
@@ -4574,7 +4555,7 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -4619,7 +4600,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
@@ -4660,7 +4641,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -4734,7 +4715,7 @@ phy_read_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -4801,7 +4782,7 @@ phy_write_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -4837,7 +4818,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -4872,7 +4853,6 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
- * @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
@@ -5082,7 +5062,9 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: true or false
+ * @led_addr: address of led register to use
* @mode: original val plus bit for set or ignore
+ *
* Set led's on or off when controlled by the PHY
*
**/
@@ -5371,6 +5353,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
enum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9fec728dc4b9..56bff8faf371 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_adminq.h"
#include "i40e_prototype.h"
@@ -945,6 +921,70 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
}
/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ i40e_status ret;
+ __le16 raw_mem;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+ true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ mem = le16_to_cpu(raw_mem);
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+ true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ mem = le16_to_cpu(raw_mem);
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
* i40e_read_lldp_cfg - read LLDP Configuration data from NVM
* @hw: pointer to the HW structure
* @lldp_cfg: pointer to hold lldp configuration variables
@@ -955,21 +995,34 @@ i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg)
{
i40e_status ret = 0;
- u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+ u32 mem;
if (!lldp_cfg)
return I40E_ERR_PARAM;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret)
- goto err_lldp_cfg;
+ return ret;
- ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
- sizeof(struct i40e_lldp_variables),
- (u8 *)lldp_cfg,
- true, NULL);
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, true, NULL);
i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
-err_lldp_cfg:
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 4f806386cb22..2b748a60a843 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 502818e3da78..9deae9a35423 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifdef CONFIG_I40E_DCB
#include "i40e.h"
@@ -47,7 +23,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
/**
* i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
* @ets: structure to hold the ETS information
*
* Returns local IEEE ETS configuration
@@ -86,8 +62,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
/**
* i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
- * @netdev: the corresponding netdev
- * @ets: structure to hold the PFC information
+ * @dev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
*
* Returns local IEEE PFC configuration
**/
@@ -119,7 +95,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
/**
* i40e_dcbnl_getdcbx - retrieve current DCBx capability
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
*
* Returns DCBx capability features
**/
@@ -132,7 +108,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
/**
* i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
+ * @perm_addr: buffer to store the MAC address
*
* Returns the SAN MAC address used for LLDP exchange
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d494dcaf18d0..56b911a5dd8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifdef CONFIG_DEBUG_FS
@@ -36,8 +12,8 @@ static struct dentry *i40e_dbg_root;
/**
* i40e_dbg_find_vsi - searches for the vsi with the given seid
- * @pf - the PF structure to search for the vsi
- * @seid - seid of the vsi it is searching for
+ * @pf: the PF structure to search for the vsi
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
@@ -55,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
/**
* i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf - the PF structure to search for the veb
- * @seid - seid of the veb it is searching for
+ * @pf: the PF structure to search for the veb
+ * @seid: seid of the veb it is searching for
**/
static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index ad6a66ccb576..334b05ff685a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DEVIDS_H_
#define _I40E_DEVIDS_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index df3e60470f8b..ef4d3762bf37 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_diag.h"
#include "i40e_prototype.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index be8341763475..c3340f320a18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b974482ff630..6947a2a571cb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* ethtool support for i40e */
@@ -43,13 +19,13 @@ struct i40e_stats {
}
#define I40E_NETDEV_STAT(_net_stat) \
- I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
+ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
#define I40E_PF_STAT(_name, _stat) \
- I40E_STAT(struct i40e_pf, _name, _stat)
+ I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
- I40E_STAT(struct i40e_vsi, _name, _stat)
+ I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
- I40E_STAT(struct i40e_veb, _name, _stat)
+ I40E_STAT(struct i40e_veb, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -66,18 +42,18 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
};
static const struct i40e_stats i40e_gstrings_veb_stats[] = {
- I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
- I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
- I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
- I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
- I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
- I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
- I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
- I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
- I40E_VEB_STAT("rx_discards", stats.rx_discards),
- I40E_VEB_STAT("tx_discards", stats.tx_discards),
- I40E_VEB_STAT("tx_errors", stats.tx_errors),
- I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+ I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("veb.rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("veb.tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("veb.tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -90,6 +66,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
+ I40E_VSI_STAT("tx_busy", tx_busy),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
@@ -105,76 +82,77 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
* is queried on the base PF netdev, not on the VMDq or FCoE netdev.
*/
static const struct i40e_stats i40e_gstrings_stats[] = {
- I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
- I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
- I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
- I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
- I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
- I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
- I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
- I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
- I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
- I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
- I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
- I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
- I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
- I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
- I40E_PF_STAT("tx_timeout", tx_timeout_count),
- I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
- I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
- I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
- I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
- I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
- I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
- I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
- I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
- I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
- I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
- I40E_PF_STAT("rx_size_64", stats.rx_size_64),
- I40E_PF_STAT("rx_size_127", stats.rx_size_127),
- I40E_PF_STAT("rx_size_255", stats.rx_size_255),
- I40E_PF_STAT("rx_size_511", stats.rx_size_511),
- I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
- I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
- I40E_PF_STAT("rx_size_big", stats.rx_size_big),
- I40E_PF_STAT("tx_size_64", stats.tx_size_64),
- I40E_PF_STAT("tx_size_127", stats.tx_size_127),
- I40E_PF_STAT("tx_size_255", stats.tx_size_255),
- I40E_PF_STAT("tx_size_511", stats.tx_size_511),
- I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
- I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
- I40E_PF_STAT("tx_size_big", stats.tx_size_big),
- I40E_PF_STAT("rx_undersize", stats.rx_undersize),
- I40E_PF_STAT("rx_fragments", stats.rx_fragments),
- I40E_PF_STAT("rx_oversize", stats.rx_oversize),
- I40E_PF_STAT("rx_jabber", stats.rx_jabber),
- I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
- I40E_PF_STAT("arq_overflows", arq_overflows),
- I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
- I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
- I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
- I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
- I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
- I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
- I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
- I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
+ I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
+ I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
+ I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
+ I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
+ I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
+ I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
+ I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
+ I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
+ I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("port.tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
+ I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors),
+ I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
+ I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
+ I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
+ I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("port.rx_size_64", stats.rx_size_64),
+ I40E_PF_STAT("port.rx_size_127", stats.rx_size_127),
+ I40E_PF_STAT("port.rx_size_255", stats.rx_size_255),
+ I40E_PF_STAT("port.rx_size_511", stats.rx_size_511),
+ I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
+ I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
+ I40E_PF_STAT("port.rx_size_big", stats.rx_size_big),
+ I40E_PF_STAT("port.tx_size_64", stats.tx_size_64),
+ I40E_PF_STAT("port.tx_size_127", stats.tx_size_127),
+ I40E_PF_STAT("port.tx_size_255", stats.tx_size_255),
+ I40E_PF_STAT("port.tx_size_511", stats.tx_size_511),
+ I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
+ I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
+ I40E_PF_STAT("port.tx_size_big", stats.tx_size_big),
+ I40E_PF_STAT("port.rx_undersize", stats.rx_undersize),
+ I40E_PF_STAT("port.rx_fragments", stats.rx_fragments),
+ I40E_PF_STAT("port.rx_oversize", stats.rx_oversize),
+ I40E_PF_STAT("port.rx_jabber", stats.rx_jabber),
+ I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("port.arq_overflows", arq_overflows),
+ I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt),
+ I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+ I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status),
+ I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match),
+ I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status),
/* LPI stats */
- I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
- I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
- I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
- I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
+ I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status),
+ I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status),
+ I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count),
+ I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
};
-#define I40E_QUEUE_STATS_LEN(n) \
- (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+/* We use num_tx_queues here as a proxy for the maximum number of queues
+ * available because we always allocate queues symmetrically.
+ */
+#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues)
+#define I40E_QUEUE_STATS_LEN(n) \
+ (I40E_MAX_NUM_QUEUES(n) \
* 2 /* Tx and Rx together */ \
* (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
-#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
-#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#define I40E_PFC_STATS_LEN ( \
@@ -977,7 +955,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseCR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseSR_Full))
+ 10000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseLR_Full))
config.link_speed |= I40E_LINK_SPEED_10GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
20000baseKR2_Full))
@@ -1079,6 +1059,9 @@ static int i40e_nway_reset(struct net_device *netdev)
/**
* i40e_get_pauseparam - Get Flow Control status
+ * @netdev: netdevice structure
+ * @pause: buffer to return pause parameters
+ *
* Return tx/rx-pause status
**/
static void i40e_get_pauseparam(struct net_device *netdev,
@@ -1677,6 +1660,32 @@ done:
return err;
}
+/**
+ * i40e_get_stats_count - return the stats count for a device
+ * @netdev: the netdev to return the count for
+ *
+ * Returns the total number of statistics for this netdev. Note that even
+ * though this is a function, it is required that the count for a specific
+ * netdev must never change. Basing the count on static values such as the
+ * maximum number of queues or the device type is ok. However, the API for
+ * obtaining stats is *not* safe against changes based on non-static
+ * values such as the *current* number of queues, or runtime flags.
+ *
+ * If a statistic is not always enabled, return it as part of the count
+ * anyways, always return its string, and report its value as zero.
+ **/
+static int i40e_get_stats_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
+ else
+ return I40E_VSI_STATS_LEN(netdev);
+}
+
static int i40e_get_sset_count(struct net_device *netdev, int sset)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -1687,16 +1696,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
- int len = I40E_PF_STATS_LEN(netdev);
-
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
- len += I40E_VEB_STATS_TOTAL;
- return len;
- } else {
- return I40E_VSI_STATS_LEN(netdev);
- }
+ return i40e_get_stats_count(netdev);
case ETH_SS_PRIV_FLAGS:
return I40E_PRIV_FLAGS_STR_LEN +
(pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
@@ -1705,6 +1705,20 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
}
+/**
+ * i40e_get_ethtool_stats - copy stat values into supplied buffer
+ * @netdev: the netdev to collect stats for
+ * @stats: ethtool stats command structure
+ * @data: ethtool supplied buffer
+ *
+ * Copy the stats values for this netdev into the buffer. Expects data to be
+ * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
+ * statistics must be copied in a static order, and the count must not change
+ * for a given netdev. See i40e_get_stats_count for more details.
+ *
+ * If a statistic is not currently valid (such as a disabled queue), this
+ * function reports its value as zero.
+ **/
static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1712,47 +1726,54 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- unsigned int j;
- int i = 0;
+ unsigned int i;
char *p;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
i40e_update_stats(vsi);
- for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
- p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
- p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) {
+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
- if (!tx_ring)
+ if (!tx_ring) {
+ /* Bump the stat counter to skip these stats, and make
+ * sure the memory is zero'd
+ */
+ *(data++) = 0;
+ *(data++) = 0;
+ *(data++) = 0;
+ *(data++) = 0;
continue;
+ }
/* process Tx ring statistics */
do {
start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- data[i] = tx_ring->stats.packets;
- data[i + 1] = tx_ring->stats.bytes;
+ data[0] = tx_ring->stats.packets;
+ data[1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
- i += 2;
+ data += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i] = rx_ring->stats.packets;
- data[i + 1] = rx_ring->stats.bytes;
+ data[0] = rx_ring->stats.packets;
+ data[1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
- i += 2;
+ data += 2;
}
rcu_read_unlock();
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
@@ -1762,38 +1783,131 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
struct i40e_veb *veb = pf->veb[pf->lan_veb];
- for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
p = (char *)veb;
- p += i40e_gstrings_veb_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+ p += i40e_gstrings_veb_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
- data[i++] = veb->tc_stats.tc_tx_packets[j];
- data[i++] = veb->tc_stats.tc_tx_bytes[j];
- data[i++] = veb->tc_stats.tc_rx_packets[j];
- data[i++] = veb->tc_stats.tc_rx_bytes[j];
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ *(data++) = veb->tc_stats.tc_tx_packets[i];
+ *(data++) = veb->tc_stats.tc_tx_bytes[i];
+ *(data++) = veb->tc_stats.tc_rx_packets[i];
+ *(data++) = veb->tc_stats.tc_rx_bytes[i];
}
+ } else {
+ data += I40E_VEB_STATS_TOTAL;
}
- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ p = (char *)pf + i40e_gstrings_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_tx[j];
- data[i++] = pf->stats.priority_xoff_tx[j];
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ *(data++) = pf->stats.priority_xon_tx[i];
+ *(data++) = pf->stats.priority_xoff_tx[i];
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_rx[j];
- data[i++] = pf->stats.priority_xoff_rx[j];
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ *(data++) = pf->stats.priority_xon_rx[i];
+ *(data++) = pf->stats.priority_xoff_rx[i];
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
- data[i++] = pf->stats.priority_xon_2_xoff[j];
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ *(data++) = pf->stats.priority_xon_2_xoff[i];
}
-static void i40e_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+/**
+ * i40e_get_stat_strings - copy stat strings into supplied buffer
+ * @netdev: the netdev to collect strings for
+ * @data: supplied buffer to copy strings into
+ *
+ * Copy the strings related to stats for this netdev. Expects data to be
+ * pre-allocated with the size reported by i40e_get_stats_count. Note that the
+ * strings must be copied in a static order and the total count must not
+ * change for a given netdev. See i40e_get_stats_count for more details.
+ **/
+static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ unsigned int i;
+ u8 *p = data;
+
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_net_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_misc_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ }
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ return;
+
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_veb_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ data += ETH_GSTRING_LEN;
+ }
+
+ WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+ "stat strings count mismatch!");
+}
+
+static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1801,98 +1915,33 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
char *p = (char *)data;
unsigned int i;
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ if (pf->hw.pf_id != 0)
+ return;
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gl_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, i40e_gstrings_test,
I40E_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_net_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_misc_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
- p += ETH_GSTRING_LEN;
- }
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
-
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "veb.%s",
- i40e_gstrings_veb_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_tx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_tx_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_rx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_rx_bytes", i);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
- i40e_gstrings_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%d_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%d_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%d_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%d_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%d_xon_2_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+ i40e_get_stat_strings(netdev, data);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
- if (pf->hw.pf_id != 0)
- break;
- for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
+ i40e_get_priv_flag_strings(netdev, data);
break;
default:
break;
@@ -2550,7 +2599,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
/**
* i40e_check_mask - Check whether a mask field is set
* @mask: the full mask value
- * @field; mask of the field to check
+ * @field: mask of the field to check
*
* If the given mask is fully set, return positive value. If the mask for the
* field is fully unset, return zero. Otherwise return a negative error code.
@@ -2621,6 +2670,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
/**
* i40e_fill_rx_flow_user_data - Fill in user-defined data field
* @fsp: pointer to rx_flow specification
+ * @data: pointer to return userdef data
*
* Reads the userdef data structure and properly fills in the user defined
* fields of the rx_flow_spec.
@@ -2799,6 +2849,7 @@ no_input_set:
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule data
*
* Returns Success if the command is supported.
**/
@@ -2840,7 +2891,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
* @nfc: pointer to user request
- * @i_setc bits currently set
+ * @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
@@ -2885,7 +2936,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
+ * @nfc: ethtool rxnfc command
*
* Returns Success if the flow input set is supported.
**/
@@ -3284,7 +3335,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list,
* __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
* @pf: Pointer to the PF structure
* @flex_pit_list: list of flexible src offsets in use
- * #flex_pit_start: index to first entry for this section of the table
+ * @flex_pit_start: index to first entry for this section of the table
*
* In order to handle flexible data, the hardware uses a table of values
* called the FLX_PIT table. This table is used to indicate which sections of
@@ -3398,7 +3449,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
/**
* i40e_flow_str - Converts a flow_type into a human readable string
- * @flow_type: the flow type from a flow specification
+ * @fsp: the flow specification
*
* Currently only flow types we support are included here, and the string
* value attempts to match what ethtool would use to configure this flow type.
@@ -4103,7 +4154,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
/**
* i40e_get_channels - Get the current channels enabled and max supported etc.
- * @netdev: network interface device structure
+ * @dev: network interface device structure
* @ch: ethtool channels structure
*
* We don't support separate tx and rx queues as channels. The other count
@@ -4112,7 +4163,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
* q_vectors since we support a lot more queue pairs than q_vectors.
**/
static void i40e_get_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+ struct ethtool_channels *ch)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -4131,14 +4182,14 @@ static void i40e_get_channels(struct net_device *dev,
/**
* i40e_set_channels - Set the new channels count.
- * @netdev: network interface device structure
+ * @dev: network interface device structure
* @ch: ethtool channels structure
*
* The new channels count may not be the same as requested by the user
* since it gets rounded down to a power of 2 value.
**/
static int i40e_set_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+ struct ethtool_channels *ch)
{
const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -4273,6 +4324,7 @@ out:
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
+ * @hfunc: hash function to use
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 6d4b590f851b..19ce93d7fd0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_osdep.h"
#include "i40e_register.h"
@@ -198,7 +174,6 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
- * @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 7b5fd33d70ae..1c78de838857 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index cd40dc487b38..994011c38fb4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_osdep.h"
#include "i40e_register.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 79e1396735d9..c46a2c449e60 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 16229998fb1e..c944bd10b03d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/etherdevice.h>
#include <linux/of_net.h>
@@ -278,8 +254,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
/**
* i40e_find_vsi_from_id - searches for the vsi with the given id
- * @pf - the pf structure to search for the vsi
- * @id - id of the vsi it is searching for
+ * @pf: the pf structure to search for the vsi
+ * @id: id of the vsi it is searching for
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
@@ -435,6 +411,7 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
/**
* i40e_get_netdev_stats_struct - Get statistics for netdev interface
* @netdev: network interface device structure
+ * @stats: data structure to store statistics
*
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
@@ -2027,7 +2004,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
* from firmware
* @count: Number of filters added
* @add_list: return data from fw
- * @head: pointer to first filter in current batch
+ * @add_head: pointer to first filter in current batch
*
* MAC filter entries from list were slated to be added to device. Returns
* number of successful filters. Note that 0 does NOT mean success!
@@ -2134,6 +2111,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
/**
* i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
* @vsi: pointer to the VSI
+ * @vsi_name: the VSI name
* @f: filter data
*
* This function sets or clears the promiscuous broadcast flags for VLAN
@@ -2840,6 +2818,7 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
/**
* i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
* @netdev: network interface to be adjusted
+ * @proto: unused protocol value
* @vid: vlan id to be added
*
* net_device_ops implementation for adding vlan ids
@@ -2862,8 +2841,26 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
}
/**
+ * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
+ * @vid: vlan id to be added
+ **/
+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vid >= VLAN_N_VID)
+ return;
+ set_bit(vid, vsi->active_vlans);
+}
+
+/**
* i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* @netdev: network interface to be adjusted
+ * @proto: unused protocol value
* @vid: vlan id to be removed
*
* net_device_ops implementation for removing vlan ids
@@ -2902,8 +2899,8 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
i40e_vlan_stripping_disable(vsi);
for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
- i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
- vid);
+ i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
+ vid);
}
/**
@@ -3485,7 +3482,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
/**
* i40e_enable_misc_int_causes - enable the non-queue interrupts
- * @hw: ptr to the hardware info
+ * @pf: pointer to private device data structure
**/
static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
{
@@ -4255,8 +4252,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
* @is_xdp: true if the queue is used for XDP
* @enable: start or stop the queue
**/
-static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
- bool is_xdp, bool enable)
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
{
int ret;
@@ -4301,7 +4298,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
if (ret)
break;
}
-
return ret;
}
@@ -4340,9 +4336,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
* @pf_q: the PF queue to configure
* @enable: start or stop the queue
*
- * This function enables or disables a single queue. Note that any delay
- * required after the operation is expected to be handled by the caller of
- * this function.
+ * This function enables or disables a single queue. Note that
+ * any delay required after the operation is expected to be
+ * handled by the caller of this function.
**/
static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
{
@@ -4372,6 +4368,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_wait_rx_q
+ * @pf: the PF structure
+ * @pf_q: queue being configured
+ * @enable: start or stop the rings
+ *
+ * This function enables or disables a single queue along with waiting
+ * for the change to finish. The caller of this function should handle
+ * the delays needed in the case of disabling queues.
+ **/
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ int ret = 0;
+
+ i40e_control_rx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
* i40e_vsi_control_rx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4383,10 +4403,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- i40e_control_rx_q(pf, pf_q, enable);
-
- /* wait for the change to finish */
- ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ ret = i40e_control_wait_rx_q(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
"VSI seid %d Rx ring %d %sable timeout\n",
@@ -5096,7 +5113,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
* i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
* @vsi: the VSI being configured
* @enabled_tc: TC bitmap
- * @bw_credits: BW shared credits per TC
+ * @bw_share: BW shared credits per TC
*
* Returns 0 on success, negative value on failure
**/
@@ -6353,6 +6370,7 @@ out:
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
+ * @isup: true of link is up, false otherwise
*/
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
@@ -7212,8 +7230,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
if (mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
- mask->dst = be32_to_cpu(mask->dst);
- dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
+ dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
&mask->dst);
return I40E_ERR_CONFIG;
}
@@ -7223,8 +7240,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
if (mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
- mask->src = be32_to_cpu(mask->src);
- dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
+ dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
&mask->src);
return I40E_ERR_CONFIG;
}
@@ -9691,9 +9707,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
+static const char *i40e_tunnel_name(u8 type)
{
- switch (port->type) {
+ switch (type) {
case UDP_TUNNEL_TYPE_VXLAN:
return "vxlan";
case UDP_TUNNEL_TYPE_GENEVE:
@@ -9727,37 +9743,68 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf)
static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ u8 filter_index, type;
u16 port;
int i;
if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
return;
+ /* acquire RTNL to maintain state of flags and port requests */
+ rtnl_lock();
+
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
+ struct i40e_udp_port_config *udp_port;
+ i40e_status ret = 0;
+
+ udp_port = &pf->udp_ports[i];
pf->pending_udp_bitmap &= ~BIT_ULL(i);
- port = pf->udp_ports[i].port;
+
+ port = READ_ONCE(udp_port->port);
+ type = READ_ONCE(udp_port->type);
+ filter_index = READ_ONCE(udp_port->filter_index);
+
+ /* release RTNL while we wait on AQ command */
+ rtnl_unlock();
+
if (port)
ret = i40e_aq_add_udp_tunnel(hw, port,
- pf->udp_ports[i].type,
- NULL, NULL);
- else
- ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
+ type,
+ &filter_index,
+ NULL);
+ else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
+ ret = i40e_aq_del_udp_tunnel(hw, filter_index,
+ NULL);
+
+ /* reacquire RTNL so we can update filter_index */
+ rtnl_lock();
if (ret) {
dev_info(&pf->pdev->dev,
"%s %s port %d, index %d failed, err %s aq_err %s\n",
- i40e_tunnel_name(&pf->udp_ports[i]),
+ i40e_tunnel_name(type),
port ? "add" : "delete",
- port, i,
+ port,
+ filter_index,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->udp_ports[i].port = 0;
+ if (port) {
+ /* failed to add, just reset port,
+ * drop pending bit for any deletion
+ */
+ udp_port->port = 0;
+ pf->pending_udp_bitmap &= ~BIT_ULL(i);
+ }
+ } else if (port) {
+ /* record filter index on success */
+ udp_port->filter_index = filter_index;
}
}
}
+
+ rtnl_unlock();
}
/**
@@ -10004,7 +10051,7 @@ unlock_pf:
/**
* i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
- * @type: VSI pointer
+ * @vsi: VSI pointer
* @free_qvectors: a bool to specify if q_vectors need to be freed.
*
* On error: returns error code (negative)
@@ -10279,21 +10326,28 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* any vectors left over go for VMDq support */
if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
- int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
- int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
-
if (!vectors_left) {
pf->num_vmdq_msix = 0;
pf->num_vmdq_qps = 0;
} else {
+ int vmdq_vecs_wanted =
+ pf->num_vmdq_vsis * pf->num_vmdq_qps;
+ int vmdq_vecs =
+ min_t(int, vectors_left, vmdq_vecs_wanted);
+
/* if we're short on vectors for what's desired, we limit
* the queues per vmdq. If this is still more than are
* available, the user will need to change the number of
* queues/vectors used by the PF later with the ethtool
* channels command
*/
- if (vmdq_vecs < vmdq_vecs_wanted)
+ if (vectors_left < vmdq_vecs_wanted) {
pf->num_vmdq_qps = 1;
+ vmdq_vecs_wanted = pf->num_vmdq_vsis;
+ vmdq_vecs = min_t(int,
+ vectors_left,
+ vmdq_vecs_wanted);
+ }
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget += vmdq_vecs;
@@ -10800,7 +10854,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
* @vsi: Pointer to VSI structure
* @seed: Buffer to store the keys
* @lut: Buffer to store the lookup table entries
- * lut_size: Size of buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
*
* Returns 0 on success, negative on failure
*/
@@ -11374,6 +11428,11 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ /* Do not report ports with pending deletions as
+ * being available.
+ */
+ if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
+ continue;
if (pf->udp_ports[i].port == port)
return i;
}
@@ -11428,6 +11487,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].port = port;
+ pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
}
@@ -11469,7 +11529,12 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
* and make it pending
*/
pf->udp_ports[idx].port = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
+
+ /* Toggle pending bit instead of setting it. This way if we are
+ * deleting a port that has yet to be added we just clear the pending
+ * bit and don't have to worry about it.
+ */
+ pf->pending_udp_bitmap ^= BIT_ULL(idx);
set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
return;
@@ -11500,6 +11565,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
+ * @vid: VLAN ID
* @flags: instructions from stack about fdb operation
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -11545,6 +11611,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* i40e_ndo_bridge_setlink - Set the hardware bridge mode
* @dev: the netdev being configured
* @nlh: RTNL message
+ * @flags: bridge flags
*
* Inserts a new hardware bridge if not already created and
* enables the bridging mode requested (VEB or VEPA). If the
@@ -11816,7 +11883,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_bpf = i40e_xdp,
.ndo_xdp_xmit = i40e_xdp_xmit,
- .ndo_xdp_flush = i40e_xdp_flush,
};
/**
@@ -14118,6 +14184,7 @@ static void i40e_remove(struct pci_dev *pdev)
/**
* i40e_pci_error_detected - warning that something funky happened in PCI land
* @pdev: PCI device information struct
+ * @error: the type of PCI error
*
* Called to warn that something happened and the error handling steps
* are in progress. Allows the driver to quiesce things, be ready for
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index ba9687c03795..0299e5bbb902 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_prototype.h"
@@ -1173,6 +1149,7 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
* i40e_nvmupd_check_wait_event - handle NVM update operation events
* @hw: pointer to the hardware structure
* @opcode: the event that just happened
+ * @desc: AdminQ descriptor
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 9c3c3b0d3ac4..a07574bff550 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2ec24188d6e2..3170655cdeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5b47dd1f75a5..35f2866b38c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e.h"
#include <linux/ptp_classify.h>
@@ -40,9 +16,9 @@
* At 1Gb link, the period is multiplied by 20. (32ns)
* 1588 functionality is not supported at 100Mbps.
*/
-#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
-#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
-#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL_MULT 2
+#define I40E_PTP_1GB_INCVAL_MULT 20
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
@@ -130,17 +106,24 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
ppb = -ppb;
}
- smp_mb(); /* Force any pending update before accessing. */
- adj = READ_ONCE(pf->ptp_base_adj);
-
- freq = adj;
+ freq = I40E_PTP_40GB_INCVAL;
freq *= ppb;
diff = div_u64(freq, 1000000000ULL);
if (neg_adj)
- adj -= diff;
+ adj = I40E_PTP_40GB_INCVAL - diff;
else
- adj += diff;
+ adj = I40E_PTP_40GB_INCVAL + diff;
+
+ /* At some link speeds, the base incval is so large that directly
+ * multiplying by ppb would result in arithmetic overflow even when
+ * using a u64. Avoid this by instead calculating the new incval
+ * always in terms of the 40GbE clock rate and then multiplying by the
+ * link speed factor afterwards. This does result in slightly lower
+ * precision at lower link speeds, but it is fairly minor.
+ */
+ smp_mb(); /* Force any pending update before accessing. */
+ adj *= READ_ONCE(pf->ptp_adj_mult);
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -334,10 +317,12 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
* This watchdog task is run periodically to make sure that we clear the Tx
* timestamp logic if we don't obtain a timestamp in a reasonable amount of
* time. It is unexpected in the normal case but if it occurs it results in
- * permanently prevent timestamps of future packets
+ * permanently preventing timestamps of future packets.
**/
void i40e_ptp_tx_hang(struct i40e_pf *pf)
{
+ struct sk_buff *skb;
+
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
return;
@@ -350,9 +335,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
* within a second it is reasonable to assume that we never will.
*/
if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
+ skb = pf->ptp_tx_skb;
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+ /* Free the skb after we clear the bitlock */
+ dev_kfree_skb_any(skb);
pf->tx_hwtstamp_timeouts++;
}
}
@@ -462,6 +450,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
struct i40e_link_status *hw_link_info;
struct i40e_hw *hw = &pf->hw;
u64 incval;
+ u32 mult;
hw_link_info = &hw->phy.link_info;
@@ -469,10 +458,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
switch (hw_link_info->link_speed) {
case I40E_LINK_SPEED_10GB:
- incval = I40E_PTP_10GB_INCVAL;
+ mult = I40E_PTP_10GB_INCVAL_MULT;
break;
case I40E_LINK_SPEED_1GB:
- incval = I40E_PTP_1GB_INCVAL;
+ mult = I40E_PTP_1GB_INCVAL_MULT;
break;
case I40E_LINK_SPEED_100MB:
{
@@ -483,15 +472,20 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
"1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
warn_once++;
}
- incval = 0;
+ mult = 0;
break;
}
case I40E_LINK_SPEED_40GB:
default:
- incval = I40E_PTP_40GB_INCVAL;
+ mult = 1;
break;
}
+ /* The increment value is calculated by taking the base 40GbE incvalue
+ * and multiplying it by a factor based on the link speed.
+ */
+ incval = I40E_PTP_40GB_INCVAL * mult;
+
/* Write the new increment value into the increment register. The
* hardware will not update the clock until both registers have been
* written.
@@ -500,14 +494,14 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- WRITE_ONCE(pf->ptp_base_adj, incval);
+ WRITE_ONCE(pf->ptp_adj_mult, mult);
smp_mb(); /* Force the above update. */
}
/**
* i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
* @pf: Board private structure
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
@@ -651,7 +645,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
/**
* i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
* @pf: Board private structure
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -805,9 +799,11 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_rx = false;
if (pf->ptp_tx_skb) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
+ struct sk_buff *skb = pf->ptp_tx_skb;
+
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ dev_kfree_skb_any(skb);
}
if (pf->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index b3e206e49cc2..52e3680c57f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 10c86f63dc52..77be0702d07c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 410ba13bcf21..424f02077e2e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* Modeled on trace-events-sample.h */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f174c72480ab..8ffb7454e67c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
#include <net/busy_poll.h>
@@ -495,7 +471,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
/**
* i40e_add_del_fdir - Build raw packets to add/del fdir filter
* @vsi: pointer to the targeted VSI
- * @cmd: command to get or set RX flow classification rules
+ * @input: filter to add or delete
* @add: true adds a filter, false removes it
*
**/
@@ -638,7 +614,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
else if (ring_is_xdp(ring))
- page_frag_free(tx_buffer->raw_buf);
+ xdp_return_frame(tx_buffer->xdpf);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
@@ -713,7 +689,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40e_get_tx_pending - how many tx descriptors not processed
- * @tx_ring: the ring of descriptors
+ * @ring: the ring of descriptors
* @in_sw: use SW variables
*
* Since there is no access to the ring head register
@@ -841,7 +817,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* free the skb/XDP data */
if (ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
+ xdp_return_frame(tx_buf->xdpf);
else
napi_consume_skb(tx_buf->skb, napi_budget);
@@ -1795,6 +1771,8 @@ static inline int i40e_ptype_to_htype(u8 ptype)
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
@@ -2054,6 +2032,21 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
#if L1_CACHE_BYTES < 128
prefetch(xdp->data + L1_CACHE_BYTES);
#endif
+ /* Note, we get here by enabling legacy-rx via:
+ *
+ * ethtool --set-priv-flags <dev> legacy-rx on
+ *
+ * In this mode, we currently get 0 extra XDP headroom as
+ * opposed to having legacy-rx off, where we process XDP
+ * packets going to stack via i40e_build_skb(). The latter
+ * provides us currently with 192 bytes of headroom.
+ *
+ * For i40e_construct_skb() mode it means that the
+ * xdp->data_meta will always point to xdp->data, since
+ * the helper cannot expand the head. Should this ever
+ * change in future for legacy-rx mode on, then lets also
+ * add xdp->data_meta handling here.
+ */
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -2105,19 +2098,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
struct xdp_buff *xdp)
{
- unsigned int size = xdp->data_end - xdp->data;
+ unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(I40E_SKB_PAD + size);
+ SKB_DATA_ALIGN(I40E_SKB_PAD +
+ (xdp->data_end -
+ xdp->data_hard_start));
#endif
struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(xdp->data);
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
+ prefetch(xdp->data_meta + L1_CACHE_BYTES);
#endif
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -2125,8 +2124,10 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, I40E_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
/* buffer is used by skb, update page_offset */
#if (PAGE_SIZE < 8192)
@@ -2203,9 +2204,20 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
#define I40E_XDP_CONSUMED 1
#define I40E_XDP_TX 2
-static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring);
+static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
+ struct i40e_ring *xdp_ring)
+{
+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!xdpf))
+ return I40E_XDP_CONSUMED;
+
+ return i40e_xmit_xdp_ring(xdpf, xdp_ring);
+}
+
/**
* i40e_run_xdp - run an XDP program
* @rx_ring: Rx ring being processed
@@ -2225,13 +2237,15 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
if (!xdp_prog)
goto xdp_out;
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
- result = i40e_xmit_xdp_ring(xdp, xdp_ring);
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
@@ -2350,7 +2364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (!skb) {
xdp.data = page_address(rx_buffer->page) +
rx_buffer->page_offset;
- xdp_set_data_meta_invalid(&xdp);
+ xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data -
i40e_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
@@ -3478,13 +3492,13 @@ dma_error:
* @xdp: data to transmit
* @xdp_ring: XDP Tx ring
**/
-static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
+static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring)
{
- u32 size = xdp->data_end - xdp->data;
u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
+ u32 size = xdpf->len;
dma_addr_t dma;
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
@@ -3492,14 +3506,14 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
return I40E_XDP_CONSUMED;
}
- dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
+ dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED;
tx_bi = &xdp_ring->tx_bi[i];
tx_bi->bytecount = size;
tx_bi->gso_segs = 1;
- tx_bi->raw_buf = xdp->data;
+ tx_bi->xdpf = xdpf;
/* record length, and DMA address */
dma_unmap_len_set(tx_bi, len, size);
@@ -3673,14 +3687,21 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* @dev: netdev
* @xdp: XDP buffer
*
- * Returns Zero if sent, else an error code
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
**/
-int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
- int err;
+ struct i40e_ring *xdp_ring;
+ int drops = 0;
+ int i;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
@@ -3688,28 +3709,24 @@ int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
return -ENXIO;
- err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
- if (err != I40E_XDP_TX)
- return -ENOSPC;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
- return 0;
-}
+ xdp_ring = vsi->xdp_rings[queue_index];
-/**
- * i40e_xdp_flush - Implements ndo_xdp_flush
- * @dev: netdev
- **/
-void i40e_xdp_flush(struct net_device *dev)
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
- unsigned int queue_index = smp_processor_id();
- struct i40e_vsi *vsi = np->vsi;
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
- if (test_bit(__I40E_VSI_DOWN, vsi->state))
- return;
+ err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
+ if (err != I40E_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
- if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
- return;
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ i40e_xdp_ring_update_tail(xdp_ring);
- i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
+ return n - drops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 3043483ec426..bb04f6a731fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
@@ -306,6 +282,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
union {
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
void *raw_buf;
};
@@ -510,8 +487,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
-int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
-void i40e_xdp_flush(struct net_device *dev);
+int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index bfb80092b352..7df969c59855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -1318,7 +1294,8 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
@@ -1337,6 +1314,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
#define I40E_PTR_TYPE BIT(15)
#define I40E_SR_OCP_CFG_WORD0 0x2B
#define I40E_SR_OCP_ENABLED BIT(15)
@@ -1454,7 +1433,8 @@ enum i40e_reset_type {
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xD
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 35173cbe80f7..c6d24eaede18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e.h"
@@ -32,8 +8,8 @@
/**
* i40e_vc_vf_broadcast
* @pf: pointer to the PF structure
- * @opcode: operation code
- * @retval: return value
+ * @v_opcode: operation code
+ * @v_retval: return value
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1663,6 +1639,7 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
/**
* i40e_vc_get_version_msg
* @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
*
* called from the VF to request the API version used by the PF
**/
@@ -1706,7 +1683,6 @@ static void i40e_del_qch(struct i40e_vf *vf)
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to request its resources
**/
@@ -1830,8 +1806,6 @@ err:
/**
* i40e_vc_reset_vf_msg
* @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to reset itself,
* unlike other virtchnl messages, PF driver
@@ -2180,6 +2154,51 @@ error_param:
}
/**
+ * i40e_ctrl_vf_tx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ u16 q_id;
+
+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ vsi->base_queue + q_id,
+ false /*is xdp*/, enable);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40e_ctrl_vf_rx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ u16 q_id;
+
+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+ ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
+ enable);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2211,8 +2230,17 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
+ /* Use the queue bit map sent by the VF */
+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+ true)) {
aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+ true)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
/* need to start the rings for additional ADq VSI's as well */
if (vf->adq_enabled) {
@@ -2260,8 +2288,17 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
-
+ /* Use the queue bit map sent by the VF */
+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+ false)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+ false)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
@@ -3556,15 +3593,16 @@ err:
* i40e_vc_process_vf_msg
* @pf: pointer to the PF structure
* @vf_id: source VF id
+ * @v_opcode: operation code
+ * @v_retval: unused return value code
* @msg: pointer to the msg buffer
* @msglen: msg length
- * @msghndl: msg handle
*
* called from the common aeq/arq handler to
* process request from VF
**/
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
+ u32 __always_unused v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
@@ -4015,7 +4053,8 @@ error_pvid:
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @tx_rate: Tx rate
+ * @min_tx_rate: Minimum Tx rate
+ * @max_tx_rate: Maximum Tx rate
*
* configure VF Tx rate
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 57f727bb9e36..bf67d62e2b5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_VIRTCHNL_PF_H_
#define _I40E_VIRTCHNL_PF_H_
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 1e89c5487676..3c5c6e962280 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,29 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 - 2014 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 2013 - 2018 Intel Corporation.
#
## Makefile for the Intel(R) 40GbE VF driver
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 6fd677efa9da..c355120dfdfd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_status.h"
#include "i40e_type.h"
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a7137c165256..1f264b9b6805 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 439e71882049..aa81e87cd471 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index 7e0fddd8af36..cb8689222c8b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 67140cdbcd7a..9cef54971312 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_type.h"
#include "i40e_adminq.h"
@@ -1255,6 +1231,7 @@ i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
enum
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 352dd3f3eb6a..f300bf271824 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DEVIDS_H_
#define _I40E_DEVIDS_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 7432596164f4..1c78de838857 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index ddac0e4908d3..82b00f70a632 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 8668ad6c1a65..3ddddb46455b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 72501bd0f1a9..a358f4b9d5aa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index c9c935659758..49e1f57d99cc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 0d7993ecb99a..77be0702d07c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
index ece01dd12a3c..d7a4e68820a8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* Modeled on trace-events-sample.h */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 12bd937861e7..a9730711e257 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
#include <net/busy_poll.h>
@@ -129,7 +105,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40evf_get_tx_pending - how many Tx descriptors not processed
- * @tx_ring: the ring of descriptors
+ * @ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
@@ -1070,6 +1046,8 @@ static inline int i40e_ptype_to_htype(u8 ptype)
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 5790897eae2e..3b5a63b3236e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 449de4b0058e..094387db3c11 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -1257,7 +1233,8 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
@@ -1273,6 +1250,9 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1386,6 +1366,10 @@ enum i40e_reset_type {
I40E_RESET_EMPR = 3,
};
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
+
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 3a7a1e77bf39..96e537a35000 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40EVF_H_
#define _I40EVF_H_
@@ -105,7 +81,6 @@ struct i40e_vsi {
#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define MAX_QUEUES 16
#define I40EVF_MAX_REQ_QUEUES 4
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index da60ce12b33d..3cc9d60d0d72 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
#include <linux/list.h>
#include <linux/errno.h>
@@ -176,7 +178,6 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
/**
* i40evf_client_add_instance - add a client instance to the instance list
* @adapter: pointer to the board struct
- * @client: pointer to a client struct in the client list.
*
* Returns cinst ptr on success, NULL on failure
**/
@@ -234,7 +235,6 @@ out:
/**
* i40evf_client_del_instance - removes a client instance from the list
* @adapter: pointer to the board struct
- * @client: pointer to the client struct
*
**/
static
@@ -438,7 +438,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
* i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
- * @qv_info: queue and vector list
+ * @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
index 15a10da5bd4a..5585f362048a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _I40E_CLIENT_H_
-#define _I40E_CLIENT_H_
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40EVF_CLIENT_H_
+#define _I40EVF_CLIENT_H_
#define I40EVF_CLIENT_STR_LENGTH 10
@@ -164,4 +166,4 @@ struct i40e_client {
/* used by clients */
int i40evf_register_client(struct i40e_client *client);
int i40evf_unregister_client(struct i40e_client *client);
-#endif /* _I40E_CLIENT_H_ */
+#endif /* _I40EVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dc4cde274fb8..69efe0aec76a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* ethtool support for i40evf */
#include "i40evf.h"
@@ -226,7 +202,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
/**
* i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
+ * @netdev: network interface device structure
*
* The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
@@ -253,7 +229,7 @@ static u32 i40evf_get_priv_flags(struct net_device *netdev)
/**
* i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
+ * @netdev: network interface device structure
* @flags: bit flags to be set
**/
static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
@@ -627,6 +603,7 @@ static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule locations
*
* Returns Success if the command is supported.
**/
@@ -746,6 +723,7 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
+ * @hfunc: hash function in use
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
@@ -774,6 +752,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
+ * @hfunc: hash function to use
*
* Returns -EINVAL if the table specifies an inavlid queue id, otherwise
* returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5f71532be7f1..a7b87f935411 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40evf.h"
#include "i40e_prototype.h"
@@ -473,6 +449,7 @@ static void i40evf_irq_affinity_release(struct kref *ref) {}
/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
+ * @basename: device basename
*
* Allocates MSI-X vectors for tx and rx handling, and requests
* interrupts from the kernel.
@@ -705,7 +682,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto clearout;
@@ -745,6 +722,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
/**
* i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
* @netdev: network device struct
+ * @proto: unused protocol data
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
@@ -762,6 +740,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
/**
* i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
* @netdev: network device struct
+ * @proto: unused protocol data
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
@@ -1946,7 +1925,8 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = (adapter->state == __I40EVF_RUNNING);
+ running = ((adapter->state == __I40EVF_RUNNING) ||
+ (adapter->state == __I40EVF_RESETTING));
if (running) {
netif_carrier_off(netdev);
@@ -2352,7 +2332,7 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
- if (num_qps > MAX_QUEUES)
+ if (num_qps > I40EVF_MAX_REQ_QUEUES)
return -EINVAL;
ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
@@ -3160,7 +3140,7 @@ static int i40evf_set_features(struct net_device *netdev,
/**
* i40evf_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
- * @netdev: This physical port's netdev
+ * @dev: This physical port's netdev
* @features: Offload features that the stack believes apply
**/
static netdev_features_t i40evf_features_check(struct sk_buff *skb,
@@ -3378,6 +3358,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ /* Do not turn on offloads when they are requested to be turned off.
+ * TSO needs minimum 576 bytes to work correctly.
+ */
+ if (netdev->wanted_features) {
+ if (!(netdev->wanted_features & NETIF_F_TSO) ||
+ netdev->mtu < 576)
+ netdev->features &= ~NETIF_F_TSO;
+ if (!(netdev->wanted_features & NETIF_F_TSO6) ||
+ netdev->mtu < 576)
+ netdev->features &= ~NETIF_F_TSO6;
+ if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
+ netdev->features &= ~NETIF_F_TSO_ECN;
+ if (!(netdev->wanted_features & NETIF_F_GRO))
+ netdev->features &= ~NETIF_F_GRO;
+ if (!(netdev->wanted_features & NETIF_F_GSO))
+ netdev->features &= ~NETIF_F_GSO;
+ }
+
adapter->vsi.id = adapter->vsi_res->vsi_id;
adapter->vsi.back = adapter;
@@ -3692,7 +3690,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ I40EVF_MAX_REQ_QUEUES);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 26a59890532f..565677de5ba3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40evf.h"
#include "i40e_prototype.h"
@@ -179,8 +155,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
/**
* i40evf_get_vf_config
- * @hw: pointer to the hardware structure
- * @len: length of buffer
+ * @adapter: private adapter structure
*
* Get VF configuration from PF and populate hw structure. Must be called after
* admin queue is initialized. Busy waits until response is received from PF,
@@ -423,8 +398,6 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
/**
* i40evf_add_ether_addrs
* @adapter: adapter structure
- * @addrs: the MAC address filters to add (contiguous)
- * @count: number of filters
*
* Request that the PF add one or more addresses to our filters.
**/
@@ -497,8 +470,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
/**
* i40evf_del_ether_addrs
* @adapter: adapter structure
- * @addrs: the MAC address filters to remove (contiguous)
- * @count: number of filtes
*
* Request that the PF remove one or more addresses from our filters.
**/
@@ -571,8 +542,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
/**
* i40evf_add_vlans
* @adapter: adapter structure
- * @vlans: the VLANs to add
- * @count: number of VLANs
*
* Request that the PF add one or more VLAN filters to our VSI.
**/
@@ -643,8 +612,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
/**
* i40evf_del_vlans
* @adapter: adapter structure
- * @vlans: the VLANs to remove
- * @count: number of VLANs
*
* Request that the PF remove one or more VLAN filters from our VSI.
**/
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7dc5f045e969..7541ec2270b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1049,7 +1049,9 @@ struct ice_aqc_set_event_mask {
* NVM Update commands (indirect 0x0703)
*/
struct ice_aqc_nvm {
- u8 cmd_flags;
+ __le16 offset_low;
+ u8 offset_high;
+ u8 cmd_flags;
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
@@ -1058,12 +1060,11 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
- u8 module_typeid;
- __le16 length;
+ __le16 module_typeid;
+ __le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
+ __le32 addr_high;
+ __le32 addr_low;
};
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index fa7a69ac92b0..92da0a626ce0 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -16,7 +16,7 @@
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
-ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
+ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
@@ -33,8 +33,9 @@ ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
- cmd->module_typeid = module_typeid;
- cmd->offset = cpu_to_le32(offset);
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
cmd->length = cpu_to_le16(length);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index c48583e98ac1..394c1e0656b9 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2014 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
#
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index dd9b6cac220d..b13b42e5a1d9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* e1000_82575
* e1000_82576
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e53ebe97d709..6ad775b1a4c5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 98534f765e0e..252440a418dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -491,6 +470,8 @@
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAH_ASEL_SRC_ADDR 0x00010000
+#define E1000_RAH_QSEL_ENABLE 0x10000000
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
#define E1000_RAH_POOL_MASK 0x03FC0000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ff835e1e853d..5d87957b2627 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,25 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 6f548247e6d8..c54ebedca6da 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* e1000_i210
* e1000_i211
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 56f015ccb206..5c437fdc49ee 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298afa0d9159..79ee0a747260 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include <linux/if_ether.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 04d80c765aee..6e110f28f922 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index ef42f1689b3b..46debd991bfe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index 4f0ecd28354d..178e60ec71d4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e4596f151cd4..09f4dcb09632 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,25 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include <linux/if_ether.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index dde68cd54a53..091cddf4ada8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4ec61243da82..2be0e762ec69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include <linux/if_ether.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 856d2cda0643..5894e4b1d0a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index e8fa8c6530e0..0ad737d2f289 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8dbc399b345e..9643b5b3d444 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -442,6 +421,8 @@ struct hwmon_buff {
enum igb_filter_match_flags {
IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
IGB_FILTER_FLAG_VLAN_TCI = 0x2,
+ IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
+ IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8,
};
#define IGB_MAX_RXNFC_FILTERS 16
@@ -456,11 +437,14 @@ struct igb_nfc_input {
u8 match_flags;
__be16 etype;
__be16 vlan_tci;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
};
struct igb_nfc_filter {
struct hlist_node nfc_node;
struct igb_nfc_input filter;
+ unsigned long cookie;
u16 etype_reg_index;
u16 sw_idx;
u16 action;
@@ -474,6 +458,8 @@ struct igb_mac_addr {
#define IGB_MAC_STATE_DEFAULT 0x1
#define IGB_MAC_STATE_IN_USE 0x2
+#define IGB_MAC_STATE_SRC_ADDR 0x4
+#define IGB_MAC_STATE_QUEUE_STEERING 0x8
/* board specific private data structure */
struct igb_adapter {
@@ -598,6 +584,7 @@ struct igb_adapter {
/* RX network flow classification support */
struct hlist_head nfc_filter_list;
+ struct hlist_head cls_flower_list;
unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
@@ -739,4 +726,9 @@ int igb_add_filter(struct igb_adapter *adapter,
int igb_erase_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
+int igb_add_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+int igb_del_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e77ba0d5866d..2d798499d35e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* ethtool support for igb */
@@ -2495,6 +2474,23 @@ static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
}
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
+ }
+
return 0;
}
return -EINVAL;
@@ -2768,14 +2764,41 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
{
+ struct e1000_hw *hw = &adapter->hw;
int err = -EINVAL;
+ if (hw->mac.type == e1000_i210 &&
+ !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) {
+ dev_err(&adapter->pdev->dev,
+ "i210 doesn't support flow classification rules specifying only source addresses.\n");
+ return -EOPNOTSUPP;
+ }
+
if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) {
err = igb_rxnfc_write_etype_filter(adapter, input);
if (err)
return err;
}
+ if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igb_add_mac_steering_filter(adapter,
+ input->filter.dst_addr,
+ input->action, 0);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igb_add_mac_steering_filter(adapter,
+ input->filter.src_addr,
+ input->action,
+ IGB_MAC_STATE_SRC_ADDR);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI)
err = igb_rxnfc_write_vlan_prio_filter(adapter, input);
@@ -2824,6 +2847,15 @@ int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
igb_clear_vlan_prio_filter(adapter,
ntohs(input->filter.vlan_tci));
+ if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR)
+ igb_del_mac_steering_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGB_MAC_STATE_SRC_ADDR);
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR)
+ igb_del_mac_steering_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
+
return 0;
}
@@ -2865,7 +2897,7 @@ static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter,
/* add filter to the list */
if (parent)
- hlist_add_behind(&parent->nfc_node, &input->nfc_node);
+ hlist_add_behind(&input->nfc_node, &parent->nfc_node);
else
hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
@@ -2905,10 +2937,6 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
return -EINVAL;
- if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK &&
- fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK))
- return -EINVAL;
-
input = kzalloc(sizeof(*input), GFP_KERNEL);
if (!input)
return -ENOMEM;
@@ -2918,6 +2946,20 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE;
}
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
+ }
+
if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index bebe43b3a836..3b83747b2700 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cce7ada89255..c33821d2afb3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -36,6 +15,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/net_tstamp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -2078,6 +2058,7 @@ int igb_up(struct igb_adapter *adapter)
igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
+ rd32(E1000_TSICR);
rd32(E1000_ICR);
igb_irq_enable(adapter);
@@ -2513,6 +2494,250 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
return 0;
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define VLAN_PRIO_FULL_MASK (0x07)
+
+static int igb_parse_cls_flower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *f,
+ int traffic_class,
+ struct igb_nfc_filter *input)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+
+ if (!is_zero_ether_addr(mask->dst)) {
+ if (!is_broadcast_ether_addr(mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |=
+ IGB_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr, key->dst);
+ }
+
+ if (!is_zero_ether_addr(mask->src)) {
+ if (!is_broadcast_ether_addr(mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |=
+ IGB_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr, key->src);
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+
+ if (mask->n_proto) {
+ if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
+ input->filter.etype = key->n_proto;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+
+ if (mask->vlan_priority) {
+ if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
+ input->filter.vlan_tci = key->vlan_priority;
+ }
+ }
+
+ input->action = traffic_class;
+ input->cookie = f->cookie;
+
+ return 0;
+}
+
+static int igb_configure_clsflower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct igb_nfc_filter *filter, *f;
+ int err, tc;
+
+ tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
+ if (tc < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
+ return -EINVAL;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
+ if (err < 0)
+ goto err_parse;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
+ err = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack,
+ "This filter is already set in ethtool");
+ goto err_locked;
+ }
+ }
+
+ hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
+ if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
+ err = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack,
+ "This filter is already set in cls_flower");
+ goto err_locked;
+ }
+ }
+
+ err = igb_add_filter(adapter, filter);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
+ goto err_locked;
+ }
+
+ hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
+
+ spin_unlock(&adapter->nfc_lock);
+
+ return 0;
+
+err_locked:
+ spin_unlock(&adapter->nfc_lock);
+
+err_parse:
+ kfree(filter);
+
+ return err;
+}
+
+static int igb_delete_clsflower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct igb_nfc_filter *filter;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
+ if (filter->cookie == cls_flower->cookie)
+ break;
+
+ if (!filter) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = igb_erase_filter(adapter, filter);
+ if (err < 0)
+ goto out;
+
+ hlist_del(&filter->nfc_node);
+ kfree(filter);
+
+out:
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
+static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return igb_configure_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return igb_delete_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct igb_adapter *adapter = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return igb_setup_tc_cls_flower(adapter, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int igb_setup_tc_block(struct igb_adapter *adapter,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
+ adapter, adapter);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
+ adapter);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2521,6 +2746,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
+ case TC_SETUP_BLOCK:
+ return igb_setup_tc_block(adapter, type_data);
default:
return -EOPNOTSUPP;
@@ -2822,6 +3049,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC;
+ if (hw->mac.type >= e1000_i350)
+ netdev->features |= NETIF_F_HW_TC;
+
#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPXIP4 | \
@@ -3636,6 +3866,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
napi_enable(&(adapter->q_vector[i]->napi));
/* Clear any pending interrupts. */
+ rd32(E1000_TSICR);
rd32(E1000_ICR);
igb_irq_enable(adapter);
@@ -3824,11 +4055,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
- /* disable the queue */
- wr32(E1000_TXDCTL(reg_idx), 0);
- wrfl();
- mdelay(10);
-
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -3859,8 +4085,16 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
**/
static void igb_configure_tx(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
int i;
+ /* disable the queues */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
+
+ wrfl();
+ usleep_range(10000, 20000);
+
for (i = 0; i < adapter->num_tx_queues; i++)
igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
@@ -6856,8 +7090,35 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter)
igb_rar_set_index(adapter, 0);
}
-static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
- const u8 queue)
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGB_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
+ (flags & IGB_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
@@ -6872,12 +7133,13 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
* addresses.
*/
for (i = 0; i < rar_entries; i++) {
- if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
+ if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, flags))
continue;
ether_addr_copy(adapter->mac_table[i].addr, addr);
adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
+ adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
igb_rar_set_index(adapter, i);
return i;
@@ -6886,9 +7148,22 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
return -ENOSPC;
}
-static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
const u8 queue)
{
+ return igb_add_mac_filter_flags(adapter, addr, queue, 0);
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGB_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
adapter->vfs_allocated_count;
@@ -6904,14 +7179,26 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
for (i = 0; i < rar_entries; i++) {
if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
continue;
+ if ((adapter->mac_table[i].state & flags) != flags)
+ continue;
if (adapter->mac_table[i].queue != queue)
continue;
if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
continue;
- adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].queue = 0;
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue =
+ adapter->vfs_allocated_count;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
igb_rar_set_index(adapter, i);
return 0;
@@ -6920,6 +7207,34 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
return -ENOENT;
}
+static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
+{
+ return igb_del_mac_filter_flags(adapter, addr, queue, 0);
+}
+
+int igb_add_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* In theory, this should be supported on 82575 as well, but
+ * that part wasn't easily accessible during development.
+ */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ return igb_add_mac_filter_flags(adapter, addr, queue,
+ IGB_MAC_STATE_QUEUE_STEERING | flags);
+}
+
+int igb_del_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igb_del_mac_filter_flags(adapter, addr, queue,
+ IGB_MAC_STATE_QUEUE_STEERING | flags);
+}
+
static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -8763,12 +9078,24 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
if (is_valid_ether_addr(addr))
rar_high |= E1000_RAH_AV;
- if (hw->mac.type == e1000_82575)
+ if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
+ rar_high |= E1000_RAH_ASEL_SRC_ADDR;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_i210:
+ if (adapter->mac_table[index].state &
+ IGB_MAC_STATE_QUEUE_STEERING)
+ rar_high |= E1000_RAH_QSEL_ENABLE;
+
rar_high |= E1000_RAH_POOL_1 *
adapter->mac_table[index].queue;
- else
+ break;
+ default:
rar_high |= E1000_RAH_POOL_1 <<
adapter->mac_table[index].queue;
+ break;
+ }
}
wr32(E1000_RAL(index), rar_low);
@@ -9206,6 +9533,9 @@ static void igb_nfc_filter_exit(struct igb_adapter *adapter)
hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
igb_erase_filter(adapter, rule);
+ hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
+ igb_erase_filter(adapter, rule);
+
spin_unlock(&adapter->nfc_lock);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7454b9895a65..9f4d700e09df 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,21 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
-/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
- *
- * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> */
+
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index efe29dae384a..afd3e36eae75 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 - 2012 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 2009 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 82576 VF ethernet driver
#
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 04bcfec0641b..4437f832412d 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index ca39e3cccaeb..3ae358b35227 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
/* ethtool support for igbvf */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f5bf248e22eb..eee26a3be90b 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 9195884096f8..163e5838f7c2 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#include "mbx.h"
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index 479b062fe9ee..e5b31818d565 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e2b7502f1953..f818f060e5a7 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 614e52409f11..625a309a3355 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index bfe8d8297b2e..b8ba3f94c363 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#include "vf.h"
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index 193b50026246..c71b0d7dbcee 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#ifndef _E1000_VF_H_
#define _E1000_VF_H_
diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
index 1b42dd554dd2..2433e9300a33 100644
--- a/drivers/net/ethernet/intel/ixgb/Makefile
+++ b/drivers/net/ethernet/intel/ixgb/Makefile
@@ -1,33 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel PRO/10GbE Linux driver
# Copyright(c) 1999 - 2008 Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
-#
# Makefile for the Intel(R) PRO/10GbE ethernet driver
#
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 92022841755f..e85271b68410 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_H_
#define _IXGB_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
index eca216b9b859..129286fc1634 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
index 475297a810fe..3ee0a09e5d0a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_EE_H_
#define _IXGB_EE_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index d10a0d242dda..43744bf0fc1c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
/* ethtool support for ixgb */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index bf9a220f71fb..cbaa933ef30d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
/* ixgb_hw.c
* Shared functions for accessing and configuring the adapter
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 19f36d87ef61..6064583095da 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_HW_H_
#define _IXGB_HW_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 24e849902d60..9695b8215f01 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_IDS_H_
#define _IXGB_IDS_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 2353c383f0a7..62f2173bc20e 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
index b1710379192e..7bd54efa698d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
/* glue for the OS independent part of ixgb
* includes register access macros
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index 04a60640ddda..f0cadd532c53 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4cd96c88cb5d..5414685189ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,32 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4f08c712e58e..fc534e91c6b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -241,8 +215,7 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
union {
struct sk_buff *skb;
- /* XDP uses address ptr on irq_clean */
- void *data;
+ struct xdp_frame *xdpf;
};
unsigned int bytecount;
unsigned short gso_segs;
@@ -306,7 +279,6 @@ enum ixgbe_ring_state_t {
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
- struct ixgbe_adapter *real_adapter;
unsigned int tx_base_queue;
unsigned int rx_base_queue;
int pool;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index cb0fe5fedb33..eee277c1bedf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 66a74f4651e8..1e49716f52bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1462,7 +1436,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 bucket_hash = 0, hi_dword = 0;
+ u32 bucket_hash = 0;
+ __be32 hi_dword = 0;
int i;
/* Apply masks to input data */
@@ -1501,7 +1476,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
* Limit hash to 13 bits since max bucket count is 8K.
* Store result at the end of the input stream.
*/
- input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+ input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
}
/**
@@ -1610,7 +1585,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return IXGBE_ERR_CONFIG;
}
- switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
/* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
@@ -1680,13 +1655,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes);
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= ntohs(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
/* configure FDIRHASH register */
- fdirhash = input->formatted.bkt_hash;
+ fdirhash = (__force u32)input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1724,7 +1699,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
s32 err;
/* configure FDIRHASH register */
- fdirhash = input->formatted.bkt_hash;
+ fdirhash = (__force u32)input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 633be93f3dbb..3f5c350716bb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -3652,7 +3626,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(buffer[i]));
+ i, (__force u32)cpu_to_le32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b311382167a..4b531e8ae38a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_COMMON_H_
#define _IXGBE_COMMON_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index aaea8282bfd2..d26cea5b43bd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,31 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 73b6362d4327..60cd5863bf5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _DCB_CONFIG_H_
#define _DCB_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 085130626330..379ae747cdce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index 7edce607f901..fdca41abb44c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _DCB_82598_CONFIG_H_
#define _DCB_82598_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1eed6811e914..7948849840a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index fa030f0abc18..c6f084883cab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _DCB_82599_CONFIG_H_
#define _DCB_82599_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index b33f3f87e4b1..c00332d2e02a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index ad54080488ee..50dfb02fa34c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,30 +1,6 @@
-/*******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
#include <linux/debugfs.h>
#include <linux/module.h>
@@ -34,15 +10,9 @@ static struct dentry *ixgbe_dbg_root;
static char ixgbe_dbg_reg_ops_buf[256] = "";
-/**
- * ixgbe_dbg_reg_ops_read - read for reg_ops datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
+static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos,
+ char *dbg_buf)
{
struct ixgbe_adapter *adapter = filp->private_data;
char *buf;
@@ -53,8 +23,7 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
return 0;
buf = kasprintf(GFP_KERNEL, "%s: %s\n",
- adapter->netdev->name,
- ixgbe_dbg_reg_ops_buf);
+ adapter->netdev->name, dbg_buf);
if (!buf)
return -ENOMEM;
@@ -70,6 +39,20 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
}
/**
+ * ixgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos,
+ ixgbe_dbg_reg_ops_buf);
+}
+
+/**
* ixgbe_dbg_reg_ops_write - write into reg_ops datum
* @filp: the opened file
* @buffer: where to find the user's data
@@ -145,33 +128,11 @@ static char ixgbe_dbg_netdev_ops_buf[256] = "";
* @count: the size of the user's buffer
* @ppos: file position offset
**/
-static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
- char __user *buffer,
+static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- struct ixgbe_adapter *adapter = filp->private_data;
- char *buf;
- int len;
-
- /* don't allow partial reads */
- if (*ppos != 0)
- return 0;
-
- buf = kasprintf(GFP_KERNEL, "%s: %s\n",
- adapter->netdev->name,
- ixgbe_dbg_netdev_ops_buf);
- if (!buf)
- return -ENOMEM;
-
- if (count < strlen(buf)) {
- kfree(buf);
- return -ENOSPC;
- }
-
- len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
-
- kfree(buf);
- return len;
+ return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos,
+ ixgbe_dbg_netdev_ops_buf);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c0e6ab42e0e1..bdd179c29ea4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* ethtool support for ixgbe */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7a09a40e4472..94b3165ff543 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include <linux/if_ether.h>
@@ -465,7 +440,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
- ddp->err = ddp_err;
+ ddp->err = (__force u32)ddp_err;
ddp->sgl = NULL;
ddp->sgc = 0;
/* fall through */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index cf1919901514..724f5382329f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_FCOE_H
#define _IXGBE_FCOE_H
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index cead23e3db0c..344a1f213a5f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -1,29 +1,5 @@
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
#include "ixgbe.h"
#include <net/xfrm.h>
@@ -43,8 +19,9 @@ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
int i;
for (i = 0; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
- IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
+ (__force u32)cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
IXGBE_WRITE_FLUSH(hw);
reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
@@ -93,7 +70,8 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
int i;
/* store the SPI (in bigendian) and IPidx */
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
+ (__force u32)cpu_to_le32((__force u32)spi));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
IXGBE_WRITE_FLUSH(hw);
@@ -101,8 +79,9 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
/* store the key, salt, and mode */
for (i = 0; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
+ (__force u32)cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
IXGBE_WRITE_FLUSH(hw);
@@ -121,7 +100,8 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
/* store the ip address */
for (i = 0; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
+ (__force u32)cpu_to_le32((__force u32)addr[i]));
IXGBE_WRITE_FLUSH(hw);
ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
@@ -391,7 +371,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
struct xfrm_state *ret = NULL;
rcu_read_lock();
- hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
+ hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
+ (__force u32)spi) {
if (spi == rsa->xs->id.spi &&
((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
(!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
@@ -401,6 +382,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
xfrm_state_hold(ret);
break;
}
+ }
rcu_read_unlock();
return ret;
}
@@ -463,6 +445,89 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
}
/**
+ * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mfval, manc, reg;
+ int num_filters = 4;
+ bool manc_ipv4;
+ u32 bmcipval;
+ int i, j;
+
+#define MANC_EN_IPV4_FILTER BIT(24)
+#define MFVAL_IPV4_FILTER_SHIFT 16
+#define MFVAL_IPV6_FILTER_SHIFT 24
+#define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
+
+#define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
+#define IXGBE_BMCIPVAL 0x5060
+#define BMCIP_V4 0x2
+#define BMCIP_V6 0x3
+#define BMCIP_MASK 0x3
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
+ mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
+ bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
+
+ if (xs->props.family == AF_INET) {
+ /* are there any IPv4 filters to check? */
+ if (manc_ipv4) {
+ /* the 4 ipv4 filters are all in MIPAF(3, i) */
+ for (i = 0; i < num_filters; i++) {
+ if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
+ continue;
+
+ reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
+ if (reg == xs->id.daddr.a4)
+ return 1;
+ }
+ }
+
+ if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
+ reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
+ if (reg == xs->id.daddr.a4)
+ return 1;
+ }
+
+ } else {
+ /* if there are ipv4 filters, they are in the last ipv6 slot */
+ if (manc_ipv4)
+ num_filters = 3;
+
+ for (i = 0; i < num_filters; i++) {
+ if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
+ continue;
+
+ for (j = 0; j < 4; j++) {
+ reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
+ if (reg != xs->id.daddr.a6[j])
+ break;
+ }
+ if (j == 4) /* did we match all 4 words? */
+ return 1;
+ }
+
+ if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
+ for (j = 0; j < 4; j++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
+ if (reg != xs->id.daddr.a6[j])
+ break;
+ }
+ if (j == 4) /* did we match all 4 words? */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
**/
@@ -483,6 +548,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
+ if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
+ return -EINVAL;
+ }
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
struct rx_sa rsa;
@@ -593,7 +663,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* hash the new entry for faster search in Rx path */
hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
- rsa.xs->id.spi);
+ (__force u32)rsa.xs->id.spi);
} else {
struct tx_sa tsa;
@@ -677,7 +747,8 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
if (!ipsec->ip_tbl[ipi].ref_cnt) {
memset(&ipsec->ip_tbl[ipi], 0,
sizeof(struct rx_ip_sa));
- ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
+ ixgbe_ipsec_set_rx_ip(hw, ipi,
+ (__force __be32 *)zerobuf);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index 4f099f516645..9ef7faadda69 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -1,30 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program. If not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
#ifndef _IXGBE_IPSEC_H_
#define _IXGBE_IPSEC_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ed4cbe94c355..893a9206e718 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2ecd55856c50..38b4e4899490 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -752,8 +727,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
ring_desc = "";
pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
+ le64_to_cpu((__force __le64)u0->a),
+ le64_to_cpu((__force __le64)u0->b),
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
@@ -864,15 +839,15 @@ rx_ring_summary:
/* Descriptor Done */
pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
+ le64_to_cpu((__force __le64)u0->a),
+ le64_to_cpu((__force __le64)u0->b),
rx_buffer_info->skb,
ring_desc);
} else {
pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
+ le64_to_cpu((__force __le64)u0->a),
+ le64_to_cpu((__force __le64)u0->b),
(u64)rx_buffer_info->dma,
rx_buffer_info->skb,
ring_desc);
@@ -1216,7 +1191,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
/* free the skb */
if (ring_is_xdp(tx_ring))
- page_frag_free(tx_buffer->data);
+ xdp_return_frame(tx_buffer->xdpf);
else
napi_consume_skb(tx_buffer->skb, napi_budget);
@@ -1768,15 +1743,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
- skb->protocol = eth_type_trans(skb, dev);
-
/* record Rx queue, or update MACVLAN statistics */
if (netif_is_ixgbe(dev))
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
- (skb->pkt_type == PACKET_BROADCAST) ||
- (skb->pkt_type == PACKET_MULTICAST));
+ false);
+
+ skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -2262,7 +2236,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
#define IXGBE_XDP_TX 2
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_buff *xdp);
+ struct xdp_frame *xdpf);
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
@@ -2270,6 +2244,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
u32 act;
rcu_read_lock();
@@ -2278,12 +2253,19 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
if (!xdp_prog)
goto xdp_out;
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- result = ixgbe_xmit_xdp_ring(adapter, xdp);
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf)) {
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
@@ -4211,7 +4193,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_offset, vf_shift;
+ u16 pool = adapter->num_rx_pools;
+ u32 reg_offset, vf_shift, vmolr;
u32 gcr_ext, vmdctl;
int i;
@@ -4225,6 +4208,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
vmdctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ /* accept untagged packets until a vlan tag is
+ * specifically set for the VMDQ queue/pool
+ */
+ vmolr = IXGBE_VMOLR_AUPE;
+ while (pool--)
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
+
vf_shift = VMDQ_P(0) % 32;
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
@@ -4892,36 +4882,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
-/**
- * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- * @vfn: pool to associate with unicast addresses
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev)) {
- struct netdev_hw_addr *ha;
- netdev_for_each_uc_addr(ha, netdev) {
- ixgbe_del_mac_filter(adapter, ha->addr, vfn);
- ixgbe_add_mac_filter(adapter, ha->addr, vfn);
- count++;
- }
- }
- return count;
-}
-
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -5301,29 +5261,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
- struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmolr;
-
- /* No unicast promiscuous support for VMDQ devices. */
- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
- vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
-
- /* clear the affected bit */
- vmolr &= ~IXGBE_VMOLR_MPE;
-
- if (dev->flags & IFF_ALLMULTI) {
- vmolr |= IXGBE_VMOLR_MPE;
- } else {
- vmolr |= IXGBE_VMOLR_ROMPE;
- hw->mac.ops.update_mc_addr_list(hw, dev);
- }
- ixgbe_write_uc_addr_list(adapter->netdev, pool);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5376,21 +5313,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static int ixgbe_fwd_ring_up(struct net_device *vdev,
+static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
- struct ixgbe_adapter *adapter = accel->real_adapter;
+ struct net_device *vdev = accel->netdev;
int i, baseq, err;
- if (!test_bit(accel->pool, adapter->fwd_bitmask))
- return 0;
-
baseq = accel->pool * adapter->num_rx_queues_per_pool;
netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
baseq, baseq + adapter->num_rx_queues_per_pool);
- accel->netdev = vdev;
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
@@ -5407,26 +5340,36 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
*/
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
- if (err >= 0) {
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ if (err >= 0)
return 0;
- }
+
+ /* if we cannot add the MAC rule then disable the offload */
+ macvlan_release_l2fw_offload(vdev);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = NULL;
+ netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ kfree(accel);
+
return err;
}
-static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
+static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_fwd_adapter *accel;
- if (dfwd->fwd_priv)
- ixgbe_fwd_ring_up(upper, vadapter);
- }
+ if (!netif_is_macvlan(vdev))
+ return 0;
+
+ accel = macvlan_accel_priv(vdev);
+ if (!accel)
+ return 0;
+
+ ixgbe_fwd_ring_up(adapter, accel);
return 0;
}
@@ -5434,7 +5377,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{
netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_upper_dev_walk, NULL);
+ ixgbe_macvlan_up, adapter);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -5797,7 +5740,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
if (ring_is_xdp(tx_ring))
- page_frag_free(tx_buffer->data);
+ xdp_return_frame(tx_buffer->xdpf);
else
dev_kfree_skb_any(tx_buffer->skb);
@@ -6370,7 +6313,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int ring_node = -1;
- int size;
+ int size, err;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -6407,6 +6350,13 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index) < 0)
goto err;
+ err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ goto err;
+ }
+
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -7671,17 +7621,19 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
return;
+ rtnl_lock();
/* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_REMOVING, &adapter->state) ||
- test_bit(__IXGBE_RESETTING, &adapter->state))
+ test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
ixgbe_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++;
- rtnl_lock();
ixgbe_reinit_locked(adapter);
rtnl_unlock();
}
@@ -7801,7 +7753,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -8336,7 +8288,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_buff *xdp)
+ struct xdp_frame *xdpf)
{
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
@@ -8345,12 +8297,12 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
dma_addr_t dma;
u16 i;
- len = xdp->data_end - xdp->data;
+ len = xdpf->len;
if (unlikely(!ixgbe_desc_unused(ring)))
return IXGBE_XDP_CONSUMED;
- dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
+ dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma))
return IXGBE_XDP_CONSUMED;
@@ -8365,7 +8317,8 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
dma_unmap_len_set(tx_buffer, len, len);
dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->data = xdp->data;
+ tx_buffer->xdpf = xdpf;
+
tx_desc->read.buffer_addr = cpu_to_le64(dma);
/* put descriptor type bits */
@@ -8827,6 +8780,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
+static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_fwd_adapter *accel;
+ int pool;
+
+ /* we only care about macvlans... */
+ if (!netif_is_macvlan(vdev))
+ return 0;
+
+ /* that have hardware offload enabled... */
+ accel = macvlan_accel_priv(vdev);
+ if (!accel)
+ return 0;
+
+ /* If we can relocate to a different bit do so */
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ if (pool < adapter->num_rx_pools) {
+ set_bit(pool, adapter->fwd_bitmask);
+ accel->pool = pool;
+ return 0;
+ }
+
+ /* if we cannot find a free pool then disable the offload */
+ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
+ macvlan_release_l2fw_offload(vdev);
+ kfree(accel);
+
+ return 0;
+}
+
+static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* flush any stale bits out of the fwd bitmask */
+ bitmap_clear(adapter->fwd_bitmask, 1, 63);
+
+ /* walk through upper devices reassigning pools */
+ netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
+ adapter);
+}
+
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -8894,6 +8890,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_defrag_macvlan_pools(dev);
+
if (netif_running(dev))
return ixgbe_open(dev);
@@ -8998,13 +8996,12 @@ struct upper_walk_data {
static int get_macvlan_queue(struct net_device *upper, void *_data)
{
if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
struct upper_walk_data *data = _data;
struct ixgbe_adapter *adapter = data->adapter;
int ifindex = data->ifindex;
- if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ if (vadapter && upper->ifindex == ifindex) {
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
data->action = data->queue;
return 1;
@@ -9108,7 +9105,8 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
for (j = 0; field_ptr[j].val; j++) {
if (field_ptr[j].off == off) {
- field_ptr[j].val(input, mask, val, m);
+ field_ptr[j].val(input, mask, (__force u32)val,
+ (__force u32)m);
input->filter.formatted.flow_type |=
field_ptr[j].type;
found_entry = true;
@@ -9117,8 +9115,10 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
}
if (nexthdr) {
if (nexthdr->off == cls->knode.sel->keys[i].off &&
- nexthdr->val == cls->knode.sel->keys[i].val &&
- nexthdr->mask == cls->knode.sel->keys[i].mask)
+ nexthdr->val ==
+ (__force u32)cls->knode.sel->keys[i].val &&
+ nexthdr->mask ==
+ (__force u32)cls->knode.sel->keys[i].mask)
found_jump_field = true;
else
continue;
@@ -9222,7 +9222,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
for (i = 0; nexthdr[i].jump; i++) {
if (nexthdr[i].o != cls->knode.sel->offoff ||
nexthdr[i].s != cls->knode.sel->offshift ||
- nexthdr[i].m != cls->knode.sel->offmask)
+ nexthdr[i].m !=
+ (__force u32)cls->knode.sel->offmask)
return err;
jump = kzalloc(sizeof(*jump), GFP_KERNEL);
@@ -9443,6 +9444,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
return features;
}
+static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
+{
+ int rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
+
+ /* go back to full RSS if we're not running SR-IOV */
+ if (!adapter->ring_feature[RING_F_VMDQ].offset)
+ adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED);
+
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+
+ ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
+}
+
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -9523,7 +9540,9 @@ static int ixgbe_set_features(struct net_device *netdev,
}
}
- if (need_reset)
+ if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
+ ixgbe_reset_l2fw_offload(adapter);
+ else if (need_reset)
ixgbe_do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER))
@@ -9786,71 +9805,98 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
- int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
- unsigned int limit;
int pool, err;
- /* Hardware has a limited number of available pools. Each VF, and the
- * PF require a pool. Check to ensure we don't attempt to use more
- * then the available number of pools.
+ /* The hardware supported by ixgbe only filters on the destination MAC
+ * address. In order to avoid issues we only support offloading modes
+ * where the hardware can actually provide the functionality.
*/
- if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
- return ERR_PTR(-EINVAL);
+ if (!macvlan_supports_dest_filter(vdev))
+ return ERR_PTR(-EMEDIUMTYPE);
+
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ if (pool == adapter->num_rx_pools) {
+ u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ u16 reserved_pools;
+
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
+ adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
+ return ERR_PTR(-EBUSY);
+
+ /* Hardware has a limited number of available pools. Each VF,
+ * and the PF require a pool. Check to ensure we don't
+ * attempt to use more then the available number of pools.
+ */
+ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
+ return ERR_PTR(-EBUSY);
- if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
- (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
- return ERR_PTR(-EBUSY);
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED;
- fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
- if (!fwd_adapter)
- return ERR_PTR(-ENOMEM);
+ /* Try to reserve as many queues per pool as possible,
+ * we start with the configurations that support 4 queues
+ * per pools, followed by 2, and then by just 1 per pool.
+ */
+ if (used_pools < 32 && adapter->num_rx_pools < 16)
+ reserved_pools = min_t(u16,
+ 32 - used_pools,
+ 16 - adapter->num_rx_pools);
+ else if (adapter->num_rx_pools < 32)
+ reserved_pools = min_t(u16,
+ 64 - used_pools,
+ 32 - adapter->num_rx_pools);
+ else
+ reserved_pools = 64 - used_pools;
- pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
- set_bit(pool, adapter->fwd_bitmask);
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
- /* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ if (!reserved_pools)
+ return ERR_PTR(-EBUSY);
- fwd_adapter->pool = pool;
- fwd_adapter->real_adapter = adapter;
+ adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ if (err)
+ return ERR_PTR(err);
- if (!err && netif_running(pdev))
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (pool >= adapter->num_rx_pools)
+ return ERR_PTR(-ENOMEM);
+ }
- if (!err)
- return fwd_adapter;
+ accel = kzalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return ERR_PTR(-ENOMEM);
- /* unwind counter and free adapter struct */
- netdev_info(pdev,
- "%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, adapter->fwd_bitmask);
- kfree(fwd_adapter);
- return ERR_PTR(err);
+ set_bit(pool, adapter->fwd_bitmask);
+ accel->pool = pool;
+ accel->netdev = vdev;
+
+ if (!netif_running(pdev))
+ return accel;
+
+ err = ixgbe_fwd_ring_up(adapter, accel);
+ if (err)
+ return ERR_PTR(err);
+
+ return accel;
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = accel->real_adapter;
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
unsigned int rxbase = accel->rx_base_queue;
- unsigned int limit, i;
+ unsigned int i;
/* delete unicast filter associated with offloaded interface */
ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
VMDQ_P(accel->pool));
- /* disable ability to receive packets for this pool */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
-
/* Allow remaining Rx packets to get flushed out of the
* Rx FIFO before we drop the netdev for the ring.
*/
@@ -9869,25 +9915,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
}
clear_bit(accel->pool, adapter->fwd_bitmask);
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
-
- /* go back to full RSS if we're done with our VMQs */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
- int rss = min_t(int, ixgbe_max_rss_indices(adapter),
- num_online_cpus());
-
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->ring_feature[RING_F_RSS].limit = rss;
- }
-
- ixgbe_setup_tc(pdev, adapter->hw_tcs);
- netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
- accel->pool, adapter->num_rx_pools,
- accel->rx_base_queue,
- accel->rx_base_queue +
- adapter->num_rx_queues_per_pool);
kfree(accel);
}
@@ -9969,7 +9996,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
}
} else {
for (i = 0; i < adapter->num_rx_queues; i++)
- xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
+ (void)xchg(&adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
}
if (old_prog)
@@ -9995,15 +10023,29 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+{
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static int ixgbe_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
- int err;
+ int drops = 0;
+ int i;
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -ENETDOWN;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
@@ -10011,35 +10053,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
if (unlikely(!ring))
return -ENXIO;
- err = ixgbe_xmit_xdp_ring(adapter, xdp);
- if (err != IXGBE_XDP_TX)
- return -ENOSPC;
-
- return 0;
-}
-
-static void ixgbe_xdp_flush(struct net_device *dev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_ring *ring;
-
- /* Its possible the device went down between xdp xmit and flush so
- * we need to ensure device is still up.
- */
- if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
- return;
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
- ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
- if (unlikely(!ring))
- return;
+ err = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (err != IXGBE_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ ixgbe_xdp_ring_update_tail(ring);
- return;
+ return n - drops;
}
static const struct net_device_ops ixgbe_netdev_ops = {
@@ -10089,7 +10117,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
- .ndo_xdp_flush = ixgbe_xdp_flush,
};
/**
@@ -10908,14 +10935,14 @@ skip_bad_vf_detection:
rtnl_lock();
netif_device_detach(netdev);
+ if (netif_running(netdev))
+ ixgbe_close_suspend(adapter);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (netif_running(netdev))
- ixgbe_close_suspend(adapter);
-
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index a0cb84381cd0..5679293e53f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index c4628b663590..e085b6520dac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 72446644f9fa..1e6cf220f543 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux drive
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_MODEL_H_
#define _IXGBE_MODEL_H_
@@ -53,8 +29,8 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
- input->filter.formatted.src_ip[0] = val;
- mask->formatted.src_ip[0] = m;
+ input->filter.formatted.src_ip[0] = (__force __be32)val;
+ mask->formatted.src_ip[0] = (__force __be32)m;
return 0;
}
@@ -62,8 +38,8 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
- input->filter.formatted.dst_ip[0] = val;
- mask->formatted.dst_ip[0] = m;
+ input->filter.formatted.dst_ip[0] = (__force __be32)val;
+ mask->formatted.dst_ip[0] = (__force __be32)m;
return 0;
}
@@ -79,10 +55,10 @@ static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
- input->filter.formatted.src_port = val & 0xffff;
- mask->formatted.src_port = m & 0xffff;
- input->filter.formatted.dst_port = val >> 16;
- mask->formatted.dst_port = m >> 16;
+ input->filter.formatted.src_port = (__force __be16)(val & 0xffff);
+ mask->formatted.src_port = (__force __be16)(m & 0xffff);
+ input->filter.formatted.dst_port = (__force __be16)(val >> 16);
+ mask->formatted.dst_port = (__force __be16)(m >> 16);
return 0;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 91bde90f9265..919a7af84b42 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index d6a7e77348c5..64e44e01c973 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_PHY_H_
#define _IXGBE_PHY_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index f6cc9166082a..b3e0d8bb5cbd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,30 +1,6 @@
-/*******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
#include "ixgbe.h"
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 008aa073a679..6f59933cdff7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -266,7 +241,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
#endif
/* Disable VMDq flag so device will be set in VM mode */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
+ if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
rss = min_t(int, ixgbe_max_rss_indices(adapter),
@@ -312,7 +287,8 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* other values out of range.
*/
num_tc = adapter->hw_tcs;
- num_rx_pools = adapter->num_rx_pools;
+ num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
+ adapter->num_rx_pools);
limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
@@ -878,14 +854,11 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET;
- if (!is_zero_ether_addr(vf_mac)) {
+ if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
memcpy(addr, vf_mac, ETH_ALEN);
} else {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
- dev_warn(&adapter->pdev->dev,
- "VF %d has no MAC address assigned, you may have to assign one manually\n",
- vf);
}
/*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index e30d1f07e891..3ec21923c89c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 24766e125592..204844288c16 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_common.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2daa81e6e9b2..e8ed37749ab1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index b8c5fd2a2115..de563cfd294d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index 182d640e9f7a..e246c0d2a427 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,27 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9592f3e3e42e..a8148c7126e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,26 +1,6 @@
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
+
#include "ixgbe_x540.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
@@ -898,8 +878,9 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
/* convert offset from words to bytes */
- buffer.address = cpu_to_be32((offset + current_word) * 2);
- buffer.length = cpu_to_be16(words_to_read * 2);
+ buffer.address = (__force u32)cpu_to_be32((offset +
+ current_word) * 2);
+ buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
buffer.pad2 = 0;
buffer.pad3 = 0;
@@ -1109,9 +1090,9 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
/* convert offset from words to bytes */
- buffer.address = cpu_to_be32(offset * 2);
+ buffer.address = (__force u32)cpu_to_be32(offset * 2);
/* one word */
- buffer.length = cpu_to_be16(sizeof(u16));
+ buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status)
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index bb47814cfa90..aba1e6a37a6a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 82599 VF ethernet driver
#
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 71c828842b11..700d8eb2f6f8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8e7d6c6f5c92..e7813d76527c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2018 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* ethtool support for ixgbevf */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 447ce1d5e0e3..56a1031dcc07 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2018 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -100,6 +76,7 @@ enum ixgbevf_ring_state_t {
__IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED,
__IXGBEVF_TX_XDP_RING,
+ __IXGBEVF_TX_XDP_RING_PRIMED,
};
#define ring_is_xdp(ring) \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 850f8af95e49..59416eddd840 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2018 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -1014,24 +991,45 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_CONSUMED;
/* record the location of the first descriptor for this packet */
- tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
-
i = ring->next_to_use;
- tx_desc = IXGBEVF_TX_DESC(ring, i);
+ tx_buffer = &ring->tx_buffer_info[i];
dma_unmap_len_set(tx_buffer, len, len);
dma_unmap_addr_set(tx_buffer, dma, dma);
tx_buffer->data = xdp->data;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ /* Populate minimal context descriptor that will provide for the
+ * fact that we are expected to process Ethernet frames.
+ */
+ if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
+ struct ixgbe_adv_tx_context_desc *context_desc;
+
+ set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
+
+ context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
+ context_desc->vlan_macip_lens =
+ cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl =
+ cpu_to_le32(IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+ context_desc->mss_l4len_idx = 0;
+
+ i = 1;
+ }
/* put descriptor type bits */
cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS;
cmd_type |= len | IXGBE_TXD_CMD;
+
+ tx_desc = IXGBEVF_TX_DESC(ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.olinfo_status =
cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
@@ -1711,6 +1709,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
sizeof(struct ixgbevf_tx_buffer) * ring->count);
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
+ clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -3142,15 +3141,17 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
return;
+ rtnl_lock();
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
- test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
adapter->tx_timeout_count++;
- rtnl_lock();
ixgbevf_reinit_locked(adapter);
rtnl_unlock();
}
@@ -4187,6 +4188,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
return -EPERM;
ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
@@ -4770,14 +4772,14 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
netif_device_detach(netdev);
+ if (netif_running(netdev))
+ ixgbevf_close_suspend(adapter);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (netif_running(netdev))
- ixgbevf_close_suspend(adapter);
-
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 2819abc454c7..6bc1953263b9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "mbx.h"
#include "ixgbevf.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 5ec947fe3d09..bfd9ae150808 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 278f73980501..68d16ae5b65a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBEVF_REGS_H_
#define _IXGBEVF_REGS_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 38d3a327c1bc..bf0577e819e1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 194fbdaa4519..d1e9e306653b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index ebe5c9148935..cc2f7701e71e 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -86,6 +86,7 @@ config MVPP2
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
+ select PHYLINK
---help---
This driver supports the network interface units in the
Marvell ARMADA 375, 7K and 8K SoCs.
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 9498ed26dbe5..55d4d10aa7d3 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o
obj-$(CONFIG_MVNETA) += mvneta.o
-obj-$(CONFIG_MVPP2) += mvpp2.o
+obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 0495487f7b42..c5dac6bd2be4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -348,10 +348,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
goto out_mdio;
}
- if (pdev->dev.of_node)
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- else
- ret = mdiobus_register(bus);
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
goto out_mdio;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
deleted file mode 100644
index 6f410235987c..000000000000
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ /dev/null
@@ -1,8956 +0,0 @@
-/*
- * Driver for Marvell PPv2 network controller for Armada 375 SoC.
- *
- * Copyright (C) 2014 Marvell
- *
- * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/acpi.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/skbuff.h>
-#include <linux/inetdevice.h>
-#include <linux/mbus.h>
-#include <linux/module.h>
-#include <linux/mfd/syscon.h>
-#include <linux/interrupt.h>
-#include <linux/cpumask.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/phy.h>
-#include <linux/phy/phy.h>
-#include <linux/clk.h>
-#include <linux/hrtimer.h>
-#include <linux/ktime.h>
-#include <linux/regmap.h>
-#include <uapi/linux/ppp_defs.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/tso.h>
-
-/* Fifo Registers */
-#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
-#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
-#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
-#define MVPP2_RX_FIFO_INIT_REG 0x64
-#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
-#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
-
-/* RX DMA Top Registers */
-#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
-#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
-#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
-#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
-#define MVPP2_POOL_BUF_SIZE_OFFSET 5
-#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
-#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
-#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
-#define MVPP2_RXQ_POOL_SHORT_OFFS 20
-#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
-#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
-#define MVPP2_RXQ_POOL_LONG_OFFS 24
-#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
-#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
-#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
-#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
-#define MVPP2_RXQ_DISABLE_MASK BIT(31)
-
-/* Top Registers */
-#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
-#define MVPP2_DSA_EXTENDED BIT(5)
-
-/* Parser Registers */
-#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
-#define MVPP2_PRS_PORT_LU_MAX 0xf
-#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
-#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
-#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
-#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
-#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
-#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
-#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
-#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
-#define MVPP2_PRS_TCAM_IDX_REG 0x1100
-#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
-#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
-#define MVPP2_PRS_SRAM_IDX_REG 0x1200
-#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
-#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
-#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
-
-/* RSS Registers */
-#define MVPP22_RSS_INDEX 0x1500
-#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
-#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
-#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
-#define MVPP22_RSS_TABLE_ENTRY 0x1508
-#define MVPP22_RSS_TABLE 0x1510
-#define MVPP22_RSS_TABLE_POINTER(p) (p)
-#define MVPP22_RSS_WIDTH 0x150c
-
-/* Classifier Registers */
-#define MVPP2_CLS_MODE_REG 0x1800
-#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
-#define MVPP2_CLS_PORT_WAY_REG 0x1810
-#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
-#define MVPP2_CLS_LKP_INDEX_REG 0x1814
-#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
-#define MVPP2_CLS_LKP_TBL_REG 0x1818
-#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
-#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
-#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
-#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
-#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
-#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
-#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
-#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
-#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
-#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
-#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
-#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
-
-/* Descriptor Manager Top Registers */
-#define MVPP2_RXQ_NUM_REG 0x2040
-#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
-#define MVPP22_DESC_ADDR_OFFS 8
-#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
-#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
-#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
-#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
-#define MVPP2_RXQ_NUM_NEW_OFFSET 16
-#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
-#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
-#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
-#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
-#define MVPP2_RXQ_THRESH_REG 0x204c
-#define MVPP2_OCCUPIED_THRESH_OFFSET 0
-#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
-#define MVPP2_RXQ_INDEX_REG 0x2050
-#define MVPP2_TXQ_NUM_REG 0x2080
-#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
-#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
-#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
-#define MVPP2_TXQ_THRESH_REG 0x2094
-#define MVPP2_TXQ_THRESH_OFFSET 16
-#define MVPP2_TXQ_THRESH_MASK 0x3fff
-#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
-#define MVPP2_TXQ_INDEX_REG 0x2098
-#define MVPP2_TXQ_PREF_BUF_REG 0x209c
-#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
-#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
-#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
-#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
-#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
-#define MVPP2_TXQ_PENDING_REG 0x20a0
-#define MVPP2_TXQ_PENDING_MASK 0x3fff
-#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
-#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
-#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
-#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
-#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
-#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
-#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
-#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
-#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
-#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
-#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
-#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
-#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
-#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
-#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
-#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
-#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
-
-/* MBUS bridge registers */
-#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
-#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
-#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
-#define MVPP2_BASE_ADDR_ENABLE 0x4060
-
-/* AXI Bridge Registers */
-#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
-#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
-#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
-#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
-#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
-#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
-#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
-#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
-#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
-#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
-#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
-#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
-
-/* Values for AXI Bridge registers */
-#define MVPP22_AXI_ATTR_CACHE_OFFS 0
-#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
-
-#define MVPP22_AXI_CODE_CACHE_OFFS 0
-#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
-
-#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
-#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
-#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
-
-#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
-#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
-
-/* Interrupt Cause and Mask registers */
-#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
-#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
-
-#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
-#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
-#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
-
-#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
-#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
-#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
-#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
-
-#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
-#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
-
-#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
-#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
-#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
-#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
-
-#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
-#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
-#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
-#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
-#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
-#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
-#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
-#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
-#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
-#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
-#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
-#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
-#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
-#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
-#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
-#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
-#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
-#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
-#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
-
-/* Buffer Manager registers */
-#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
-#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
-#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
-#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
-#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
-#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
-#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
-#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
-#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
-#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
-#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
-#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
-#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
-#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
-#define MVPP2_BM_START_MASK BIT(0)
-#define MVPP2_BM_STOP_MASK BIT(1)
-#define MVPP2_BM_STATE_MASK BIT(4)
-#define MVPP2_BM_LOW_THRESH_OFFS 8
-#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
-#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
- MVPP2_BM_LOW_THRESH_OFFS)
-#define MVPP2_BM_HIGH_THRESH_OFFS 16
-#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
-#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
- MVPP2_BM_HIGH_THRESH_OFFS)
-#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
-#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
-#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
-#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
-#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
-#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
-#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
-#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
-#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
-#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
-#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
-#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
-#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
-#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
-#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
-#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
-#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
-#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
-#define MVPP2_BM_VIRT_RLS_REG 0x64c0
-#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
-#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
-#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
-#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
-
-/* TX Scheduler registers */
-#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
-#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
-#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
-#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
-#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
-#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
-#define MVPP2_TXP_SCHED_MTU_REG 0x801c
-#define MVPP2_TXP_MTU_MAX 0x7FFFF
-#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
-#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
-#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
-#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
-#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
-#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
-#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
-#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
-#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
-#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
-#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
-#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
-#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
-#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
-
-/* TX general registers */
-#define MVPP2_TX_SNOOP_REG 0x8800
-#define MVPP2_TX_PORT_FLUSH_REG 0x8810
-#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
-
-/* LMS registers */
-#define MVPP2_SRC_ADDR_MIDDLE 0x24
-#define MVPP2_SRC_ADDR_HIGH 0x28
-#define MVPP2_PHY_AN_CFG0_REG 0x34
-#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
-#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
-#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
-
-/* Per-port registers */
-#define MVPP2_GMAC_CTRL_0_REG 0x0
-#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
-#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
-#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
-#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
-#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
-#define MVPP2_GMAC_CTRL_1_REG 0x4
-#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
-#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
-#define MVPP2_GMAC_PCS_LB_EN_BIT 6
-#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
-#define MVPP2_GMAC_SA_LOW_OFFS 7
-#define MVPP2_GMAC_CTRL_2_REG 0x8
-#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
-#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
-#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
-#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
-#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
-#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
-#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
-#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
-#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
-#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
-#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
-#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
-#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
-#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
-#define MVPP2_GMAC_FC_ADV_EN BIT(9)
-#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
-#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
-#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
-#define MVPP2_GMAC_STATUS0 0x10
-#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
-#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
-#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
-#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
-#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
- MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
-#define MVPP22_GMAC_INT_STAT 0x20
-#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
-#define MVPP22_GMAC_INT_MASK 0x24
-#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
-#define MVPP22_GMAC_CTRL_4_REG 0x90
-#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
-#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
-#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
-#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
-#define MVPP22_GMAC_INT_SUM_MASK 0xa4
-#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
-
-/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
- * relative to port->base.
- */
-#define MVPP22_XLG_CTRL0_REG 0x100
-#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
-#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
-#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
-#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
-#define MVPP22_XLG_CTRL1_REG 0x104
-#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
-#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
-#define MVPP22_XLG_STATUS 0x10c
-#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
-#define MVPP22_XLG_INT_STAT 0x114
-#define MVPP22_XLG_INT_STAT_LINK BIT(1)
-#define MVPP22_XLG_INT_MASK 0x118
-#define MVPP22_XLG_INT_MASK_LINK BIT(1)
-#define MVPP22_XLG_CTRL3_REG 0x11c
-#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
-#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
-#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
-#define MVPP22_XLG_EXT_INT_MASK 0x15c
-#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
-#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
-#define MVPP22_XLG_CTRL4_REG 0x184
-#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
-#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
-#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
-
-/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
-#define MVPP22_SMI_MISC_CFG_REG 0x1204
-#define MVPP22_SMI_POLLING_EN BIT(10)
-
-#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
-
-#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
-
-/* Descriptor ring Macros */
-#define MVPP2_QUEUE_NEXT_DESC(q, index) \
- (((index) < (q)->last_desc) ? ((index) + 1) : 0)
-
-/* XPCS registers. PPv2.2 only */
-#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
-#define MVPP22_MPCS_CTRL 0x14
-#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
-#define MVPP22_MPCS_CLK_RESET 0x14c
-#define MAC_CLK_RESET_SD_TX BIT(0)
-#define MAC_CLK_RESET_SD_RX BIT(1)
-#define MAC_CLK_RESET_MAC BIT(2)
-#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
-#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
-
-/* XPCS registers. PPv2.2 only */
-#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
-#define MVPP22_XPCS_CFG0 0x0
-#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
-#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
-
-/* System controller registers. Accessed through a regmap. */
-#define GENCONF_SOFT_RESET1 0x1108
-#define GENCONF_SOFT_RESET1_GOP BIT(6)
-#define GENCONF_PORT_CTRL0 0x1110
-#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
-#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
-#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
-#define GENCONF_PORT_CTRL1 0x1114
-#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
-#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
-#define GENCONF_CTRL0 0x1120
-#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
-#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
-#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
-
-/* Various constants */
-
-/* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
-#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
-#define MVPP2_TXDONE_COAL_USEC 1000
-#define MVPP2_RX_COAL_PKTS 32
-#define MVPP2_RX_COAL_USEC 64
-
-/* The two bytes Marvell header. Either contains a special value used
- * by Marvell switches when a specific hardware mode is enabled (not
- * supported by this driver) or is filled automatically by zeroes on
- * the RX side. Those two bytes being at the front of the Ethernet
- * header, they allow to have the IP header aligned on a 4 bytes
- * boundary automatically: the hardware skips those two bytes on its
- * own.
- */
-#define MVPP2_MH_SIZE 2
-#define MVPP2_ETH_TYPE_LEN 2
-#define MVPP2_PPPOE_HDR_SIZE 8
-#define MVPP2_VLAN_TAG_LEN 4
-#define MVPP2_VLAN_TAG_EDSA_LEN 8
-
-/* Lbtd 802.3 type */
-#define MVPP2_IP_LBDT_TYPE 0xfffa
-
-#define MVPP2_TX_CSUM_MAX_SIZE 9800
-
-/* Timeout constants */
-#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
-#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
-
-#define MVPP2_TX_MTU_MAX 0x7ffff
-
-/* Maximum number of T-CONTs of PON port */
-#define MVPP2_MAX_TCONT 16
-
-/* Maximum number of supported ports */
-#define MVPP2_MAX_PORTS 4
-
-/* Maximum number of TXQs used by single port */
-#define MVPP2_MAX_TXQ 8
-
-/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
- * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
- * multiply this value by two to count the maximum number of skb descs needed.
- */
-#define MVPP2_MAX_TSO_SEGS 300
-#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
-
-/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ 4
-
-/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD_MAX 1024
-#define MVPP2_MAX_RXD_DFLT 128
-
-/* Max number of Tx descriptors */
-#define MVPP2_MAX_TXD_MAX 2048
-#define MVPP2_MAX_TXD_DFLT 1024
-
-/* Amount of Tx descriptors that can be reserved at once by CPU */
-#define MVPP2_CPU_DESC_CHUNK 64
-
-/* Max number of Tx descriptors in each aggregated queue */
-#define MVPP2_AGGR_TXQ_SIZE 256
-
-/* Descriptor aligned size */
-#define MVPP2_DESC_ALIGNED_SIZE 32
-
-/* Descriptor alignment mask */
-#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
-
-/* RX FIFO constants */
-#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
-#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
-#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
-#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
-
-/* TX FIFO constants */
-#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
-#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
-#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
-#define MVPP2_TX_FIFO_THRESHOLD_10KB \
- (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
-#define MVPP2_TX_FIFO_THRESHOLD_3KB \
- (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
-
-/* RX buffer constants */
-#define MVPP2_SKB_SHINFO_SIZE \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-
-#define MVPP2_RX_PKT_SIZE(mtu) \
- ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
- ETH_HLEN + ETH_FCS_LEN, cache_line_size())
-
-#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
-#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
-#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
- ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
-
-#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
-
-/* IPv6 max L3 address size */
-#define MVPP2_MAX_L3_ADDR_SIZE 16
-
-/* Port flags */
-#define MVPP2_F_LOOPBACK BIT(0)
-
-/* Marvell tag types */
-enum mvpp2_tag_type {
- MVPP2_TAG_TYPE_NONE = 0,
- MVPP2_TAG_TYPE_MH = 1,
- MVPP2_TAG_TYPE_DSA = 2,
- MVPP2_TAG_TYPE_EDSA = 3,
- MVPP2_TAG_TYPE_VLAN = 4,
- MVPP2_TAG_TYPE_LAST = 5
-};
-
-/* Parser constants */
-#define MVPP2_PRS_TCAM_SRAM_SIZE 256
-#define MVPP2_PRS_TCAM_WORDS 6
-#define MVPP2_PRS_SRAM_WORDS 4
-#define MVPP2_PRS_FLOW_ID_SIZE 64
-#define MVPP2_PRS_FLOW_ID_MASK 0x3f
-#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
-#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
-#define MVPP2_PRS_IPV4_HEAD 0x40
-#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
-#define MVPP2_PRS_IPV4_MC 0xe0
-#define MVPP2_PRS_IPV4_MC_MASK 0xf0
-#define MVPP2_PRS_IPV4_BC_MASK 0xff
-#define MVPP2_PRS_IPV4_IHL 0x5
-#define MVPP2_PRS_IPV4_IHL_MASK 0xf
-#define MVPP2_PRS_IPV6_MC 0xff
-#define MVPP2_PRS_IPV6_MC_MASK 0xff
-#define MVPP2_PRS_IPV6_HOP_MASK 0xff
-#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
-#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
-#define MVPP2_PRS_DBL_VLANS_MAX 100
-#define MVPP2_PRS_CAST_MASK BIT(0)
-#define MVPP2_PRS_MCAST_VAL BIT(0)
-#define MVPP2_PRS_UCAST_VAL 0x0
-
-/* Tcam structure:
- * - lookup ID - 4 bits
- * - port ID - 1 byte
- * - additional information - 1 byte
- * - header data - 8 bytes
- * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
- */
-#define MVPP2_PRS_AI_BITS 8
-#define MVPP2_PRS_PORT_MASK 0xff
-#define MVPP2_PRS_LU_MASK 0xf
-#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
- (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
-#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
- (((offs) * 2) - ((offs) % 2) + 2)
-#define MVPP2_PRS_TCAM_AI_BYTE 16
-#define MVPP2_PRS_TCAM_PORT_BYTE 17
-#define MVPP2_PRS_TCAM_LU_BYTE 20
-#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
-#define MVPP2_PRS_TCAM_INV_WORD 5
-
-#define MVPP2_PRS_VID_TCAM_BYTE 2
-
-/* TCAM range for unicast and multicast filtering. We have 25 entries per port,
- * with 4 dedicated to UC filtering and the rest to multicast filtering.
- * Additionnally we reserve one entry for the broadcast address, and one for
- * each port's own address.
- */
-#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
-#define MVPP2_PRS_MAC_RANGE_SIZE 80
-
-/* Number of entries per port dedicated to UC and MC filtering */
-#define MVPP2_PRS_MAC_UC_FILT_MAX 4
-#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
- MVPP2_PRS_MAC_UC_FILT_MAX)
-
-/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
- * 10 VLAN ID filter entries per port
- * 1 default VLAN filter entry per port
- * It is assumed that there are 3 ports for filter, not including loopback port
- */
-#define MVPP2_PRS_VLAN_FILT_MAX 11
-#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
-
-#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
-#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
-
-/* Tcam entries ID */
-#define MVPP2_PE_DROP_ALL 0
-#define MVPP2_PE_FIRST_FREE_TID 1
-
-/* MAC filtering range */
-#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
-#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
- MVPP2_PRS_MAC_RANGE_SIZE + 1)
-/* VLAN filtering range */
-#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
-#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
- MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
-#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
-#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
-#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
-#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
-#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
-#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
-#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
-#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
-#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
-#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
-#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
-#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
-#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
-#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
-#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
-#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
-#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
-#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
-#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
-#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
-#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
-#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
-#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
-#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
-#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
-#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
-
-#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
- ((port) * MVPP2_PRS_VLAN_FILT_MAX))
-#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
- + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
-/* Index of default vid filter for given port */
-#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
- + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
-
-/* Sram structure
- * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
- */
-#define MVPP2_PRS_SRAM_RI_OFFS 0
-#define MVPP2_PRS_SRAM_RI_WORD 0
-#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
-#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
-#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
-#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
-#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
-#define MVPP2_PRS_SRAM_UDF_OFFS 73
-#define MVPP2_PRS_SRAM_UDF_BITS 8
-#define MVPP2_PRS_SRAM_UDF_MASK 0xff
-#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
-#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
-#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
-#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
-#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
-#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
-#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
-#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
-#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
-#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
-#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
-#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
-#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
-#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
-#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
-#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
-#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
-#define MVPP2_PRS_SRAM_AI_OFFS 90
-#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
-#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
-#define MVPP2_PRS_SRAM_AI_MASK 0xff
-#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
-#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
-#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
-#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
-
-/* Sram result info bits assignment */
-#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
-#define MVPP2_PRS_RI_DSA_MASK 0x2
-#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
-#define MVPP2_PRS_RI_VLAN_NONE 0x0
-#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
-#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
-#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
-#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
-#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
-#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
-#define MVPP2_PRS_RI_L2_UCAST 0x0
-#define MVPP2_PRS_RI_L2_MCAST BIT(9)
-#define MVPP2_PRS_RI_L2_BCAST BIT(10)
-#define MVPP2_PRS_RI_PPPOE_MASK 0x800
-#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
-#define MVPP2_PRS_RI_L3_UN 0x0
-#define MVPP2_PRS_RI_L3_IP4 BIT(12)
-#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
-#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
-#define MVPP2_PRS_RI_L3_IP6 BIT(14)
-#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
-#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
-#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
-#define MVPP2_PRS_RI_L3_UCAST 0x0
-#define MVPP2_PRS_RI_L3_MCAST BIT(15)
-#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
-#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
-#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
-#define MVPP2_PRS_RI_UDF3_MASK 0x300000
-#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
-#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
-#define MVPP2_PRS_RI_L4_TCP BIT(22)
-#define MVPP2_PRS_RI_L4_UDP BIT(23)
-#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
-#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
-#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
-#define MVPP2_PRS_RI_DROP_MASK 0x80000000
-
-/* Sram additional info bits assignment */
-#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
-#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
-#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
-#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
-#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
-#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
-#define MVPP2_PRS_SINGLE_VLAN_AI 0
-#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
-#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
-
-/* DSA/EDSA type */
-#define MVPP2_PRS_TAGGED true
-#define MVPP2_PRS_UNTAGGED false
-#define MVPP2_PRS_EDSA true
-#define MVPP2_PRS_DSA false
-
-/* MAC entries, shadow udf */
-enum mvpp2_prs_udf {
- MVPP2_PRS_UDF_MAC_DEF,
- MVPP2_PRS_UDF_MAC_RANGE,
- MVPP2_PRS_UDF_L2_DEF,
- MVPP2_PRS_UDF_L2_DEF_COPY,
- MVPP2_PRS_UDF_L2_USER,
-};
-
-/* Lookup ID */
-enum mvpp2_prs_lookup {
- MVPP2_PRS_LU_MH,
- MVPP2_PRS_LU_MAC,
- MVPP2_PRS_LU_DSA,
- MVPP2_PRS_LU_VLAN,
- MVPP2_PRS_LU_VID,
- MVPP2_PRS_LU_L2,
- MVPP2_PRS_LU_PPPOE,
- MVPP2_PRS_LU_IP4,
- MVPP2_PRS_LU_IP6,
- MVPP2_PRS_LU_FLOWS,
- MVPP2_PRS_LU_LAST,
-};
-
-/* L2 cast enum */
-enum mvpp2_prs_l2_cast {
- MVPP2_PRS_L2_UNI_CAST,
- MVPP2_PRS_L2_MULTI_CAST,
-};
-
-/* L3 cast enum */
-enum mvpp2_prs_l3_cast {
- MVPP2_PRS_L3_UNI_CAST,
- MVPP2_PRS_L3_MULTI_CAST,
- MVPP2_PRS_L3_BROAD_CAST
-};
-
-/* Classifier constants */
-#define MVPP2_CLS_FLOWS_TBL_SIZE 512
-#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
-#define MVPP2_CLS_LKP_TBL_SIZE 64
-#define MVPP2_CLS_RX_QUEUES 256
-
-/* RSS constants */
-#define MVPP22_RSS_TABLE_ENTRIES 32
-
-/* BM constants */
-#define MVPP2_BM_JUMBO_BUF_NUM 512
-#define MVPP2_BM_LONG_BUF_NUM 1024
-#define MVPP2_BM_SHORT_BUF_NUM 2048
-#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
-#define MVPP2_BM_POOL_PTR_ALIGN 128
-
-/* BM cookie (32 bits) definition */
-#define MVPP2_BM_COOKIE_POOL_OFFS 8
-#define MVPP2_BM_COOKIE_CPU_OFFS 24
-
-#define MVPP2_BM_SHORT_FRAME_SIZE 512
-#define MVPP2_BM_LONG_FRAME_SIZE 2048
-#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
-/* BM short pool packet size
- * These value assure that for SWF the total number
- * of bytes allocated for each buffer will be 512
- */
-#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
-#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
-#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
-
-#define MVPP21_ADDR_SPACE_SZ 0
-#define MVPP22_ADDR_SPACE_SZ SZ_64K
-
-#define MVPP2_MAX_THREADS 8
-#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
-
-enum mvpp2_bm_pool_log_num {
- MVPP2_BM_SHORT,
- MVPP2_BM_LONG,
- MVPP2_BM_JUMBO,
- MVPP2_BM_POOLS_NUM
-};
-
-static struct {
- int pkt_size;
- int buf_num;
-} mvpp2_pools[MVPP2_BM_POOLS_NUM];
-
-/* GMAC MIB Counters register definitions */
-#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
-#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
-#define MVPP22_MIB_COUNTERS_OFFSET 0x0
-#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
-
-#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
-#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
-#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
-#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
-#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
-#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
-#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
-#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
-#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
-#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
-#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
-#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
-#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
-#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
-#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
-#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
-#define MVPP2_MIB_FC_SENT 0x54
-#define MVPP2_MIB_FC_RCVD 0x58
-#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
-#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
-#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
-#define MVPP2_MIB_OVERSIZE_RCVD 0x68
-#define MVPP2_MIB_JABBER_RCVD 0x6c
-#define MVPP2_MIB_MAC_RCV_ERROR 0x70
-#define MVPP2_MIB_BAD_CRC_EVENT 0x74
-#define MVPP2_MIB_COLLISION 0x78
-#define MVPP2_MIB_LATE_COLLISION 0x7c
-
-#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
-
-#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
-
-/* Definitions */
-
-/* Shared Packet Processor resources */
-struct mvpp2 {
- /* Shared registers' base addresses */
- void __iomem *lms_base;
- void __iomem *iface_base;
-
- /* On PPv2.2, each "software thread" can access the base
- * register through a separate address space, each 64 KB apart
- * from each other. Typically, such address spaces will be
- * used per CPU.
- */
- void __iomem *swth_base[MVPP2_MAX_THREADS];
-
- /* On PPv2.2, some port control registers are located into the system
- * controller space. These registers are accessible through a regmap.
- */
- struct regmap *sysctrl_base;
-
- /* Common clocks */
- struct clk *pp_clk;
- struct clk *gop_clk;
- struct clk *mg_clk;
- struct clk *mg_core_clk;
- struct clk *axi_clk;
-
- /* List of pointers to port structures */
- int port_count;
- struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
-
- /* Aggregated TXQs */
- struct mvpp2_tx_queue *aggr_txqs;
-
- /* BM pools */
- struct mvpp2_bm_pool *bm_pools;
-
- /* PRS shadow table */
- struct mvpp2_prs_shadow *prs_shadow;
- /* PRS auxiliary table for double vlan entries control */
- bool *prs_double_vlans;
-
- /* Tclk value */
- u32 tclk;
-
- /* HW version */
- enum { MVPP21, MVPP22 } hw_version;
-
- /* Maximum number of RXQs per port */
- unsigned int max_port_rxqs;
-
- /* Workqueue to gather hardware statistics */
- char queue_name[30];
- struct workqueue_struct *stats_queue;
-};
-
-struct mvpp2_pcpu_stats {
- struct u64_stats_sync syncp;
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
-};
-
-/* Per-CPU port control */
-struct mvpp2_port_pcpu {
- struct hrtimer tx_done_timer;
- bool timer_scheduled;
- /* Tasklet for egress finalization */
- struct tasklet_struct tx_done_tasklet;
-};
-
-struct mvpp2_queue_vector {
- int irq;
- struct napi_struct napi;
- enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
- int sw_thread_id;
- u16 sw_thread_mask;
- int first_rxq;
- int nrxqs;
- u32 pending_cause_rx;
- struct mvpp2_port *port;
-};
-
-struct mvpp2_port {
- u8 id;
-
- /* Index of the port from the "group of ports" complex point
- * of view
- */
- int gop_id;
-
- int link_irq;
-
- struct mvpp2 *priv;
-
- /* Firmware node associated to the port */
- struct fwnode_handle *fwnode;
-
- /* Per-port registers' base address */
- void __iomem *base;
- void __iomem *stats_base;
-
- struct mvpp2_rx_queue **rxqs;
- unsigned int nrxqs;
- struct mvpp2_tx_queue **txqs;
- unsigned int ntxqs;
- struct net_device *dev;
-
- int pkt_size;
-
- /* Per-CPU port control */
- struct mvpp2_port_pcpu __percpu *pcpu;
-
- /* Flags */
- unsigned long flags;
-
- u16 tx_ring_size;
- u16 rx_ring_size;
- struct mvpp2_pcpu_stats __percpu *stats;
- u64 *ethtool_stats;
-
- /* Per-port work and its lock to gather hardware statistics */
- struct mutex gather_stats_lock;
- struct delayed_work stats_work;
-
- phy_interface_t phy_interface;
- struct device_node *phy_node;
- struct phy *comphy;
- unsigned int link;
- unsigned int duplex;
- unsigned int speed;
-
- struct mvpp2_bm_pool *pool_long;
- struct mvpp2_bm_pool *pool_short;
-
- /* Index of first port's physical RXQ */
- u8 first_rxq;
-
- struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
- unsigned int nqvecs;
- bool has_tx_irqs;
-
- u32 tx_time_coal;
-};
-
-/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
- * layout of the transmit and reception DMA descriptors, and their
- * layout is therefore defined by the hardware design
- */
-
-#define MVPP2_TXD_L3_OFF_SHIFT 0
-#define MVPP2_TXD_IP_HLEN_SHIFT 8
-#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
-#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
-#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
-#define MVPP2_TXD_PADDING_DISABLE BIT(23)
-#define MVPP2_TXD_L4_UDP BIT(24)
-#define MVPP2_TXD_L3_IP6 BIT(26)
-#define MVPP2_TXD_L_DESC BIT(28)
-#define MVPP2_TXD_F_DESC BIT(29)
-
-#define MVPP2_RXD_ERR_SUMMARY BIT(15)
-#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
-#define MVPP2_RXD_ERR_CRC 0x0
-#define MVPP2_RXD_ERR_OVERRUN BIT(13)
-#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
-#define MVPP2_RXD_BM_POOL_ID_OFFS 16
-#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
-#define MVPP2_RXD_HWF_SYNC BIT(21)
-#define MVPP2_RXD_L4_CSUM_OK BIT(22)
-#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
-#define MVPP2_RXD_L4_TCP BIT(25)
-#define MVPP2_RXD_L4_UDP BIT(26)
-#define MVPP2_RXD_L3_IP4 BIT(28)
-#define MVPP2_RXD_L3_IP6 BIT(30)
-#define MVPP2_RXD_BUF_HDR BIT(31)
-
-/* HW TX descriptor for PPv2.1 */
-struct mvpp21_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
- u8 packet_offset; /* the offset from the buffer beginning */
- u8 phys_txq; /* destination queue ID */
- u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_dma_addr; /* physical addr of transmitted buffer */
- u32 buf_cookie; /* cookie for access to TX buffer in tx path */
- u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
- u32 reserved2; /* reserved (for future use) */
-};
-
-/* HW RX descriptor for PPv2.1 */
-struct mvpp21_rx_desc {
- u32 status; /* info about received packet */
- u16 reserved1; /* parser_info (for future use, PnC) */
- u16 data_size; /* size of received packet in bytes */
- u32 buf_dma_addr; /* physical address of the buffer */
- u32 buf_cookie; /* cookie for access to RX buffer in rx path */
- u16 reserved2; /* gem_port_id (for future use, PON) */
- u16 reserved3; /* csum_l4 (for future use, PnC) */
- u8 reserved4; /* bm_qset (for future use, BM) */
- u8 reserved5;
- u16 reserved6; /* classify_info (for future use, PnC) */
- u32 reserved7; /* flow_id (for future use, PnC) */
- u32 reserved8;
-};
-
-/* HW TX descriptor for PPv2.2 */
-struct mvpp22_tx_desc {
- u32 command;
- u8 packet_offset;
- u8 phys_txq;
- u16 data_size;
- u64 reserved1;
- u64 buf_dma_addr_ptp;
- u64 buf_cookie_misc;
-};
-
-/* HW RX descriptor for PPv2.2 */
-struct mvpp22_rx_desc {
- u32 status;
- u16 reserved1;
- u16 data_size;
- u32 reserved2;
- u32 reserved3;
- u64 buf_dma_addr_key_hash;
- u64 buf_cookie_misc;
-};
-
-/* Opaque type used by the driver to manipulate the HW TX and RX
- * descriptors
- */
-struct mvpp2_tx_desc {
- union {
- struct mvpp21_tx_desc pp21;
- struct mvpp22_tx_desc pp22;
- };
-};
-
-struct mvpp2_rx_desc {
- union {
- struct mvpp21_rx_desc pp21;
- struct mvpp22_rx_desc pp22;
- };
-};
-
-struct mvpp2_txq_pcpu_buf {
- /* Transmitted SKB */
- struct sk_buff *skb;
-
- /* Physical address of transmitted buffer */
- dma_addr_t dma;
-
- /* Size transmitted */
- size_t size;
-};
-
-/* Per-CPU Tx queue control */
-struct mvpp2_txq_pcpu {
- int cpu;
-
- /* Number of Tx DMA descriptors in the descriptor ring */
- int size;
-
- /* Number of currently used Tx DMA descriptor in the
- * descriptor ring
- */
- int count;
-
- int wake_threshold;
- int stop_threshold;
-
- /* Number of Tx DMA descriptors reserved for each CPU */
- int reserved_num;
-
- /* Infos about transmitted buffers */
- struct mvpp2_txq_pcpu_buf *buffs;
-
- /* Index of last TX DMA descriptor that was inserted */
- int txq_put_index;
-
- /* Index of the TX DMA descriptor to be cleaned up */
- int txq_get_index;
-
- /* DMA buffer for TSO headers */
- char *tso_headers;
- dma_addr_t tso_headers_dma;
-};
-
-struct mvpp2_tx_queue {
- /* Physical number of this Tx queue */
- u8 id;
-
- /* Logical number of this Tx queue */
- u8 log_id;
-
- /* Number of Tx DMA descriptors in the descriptor ring */
- int size;
-
- /* Number of currently used Tx DMA descriptor in the descriptor ring */
- int count;
-
- /* Per-CPU control of physical Tx queues */
- struct mvpp2_txq_pcpu __percpu *pcpu;
-
- u32 done_pkts_coal;
-
- /* Virtual address of thex Tx DMA descriptors array */
- struct mvpp2_tx_desc *descs;
-
- /* DMA address of the Tx DMA descriptors array */
- dma_addr_t descs_dma;
-
- /* Index of the last Tx DMA descriptor */
- int last_desc;
-
- /* Index of the next Tx DMA descriptor to process */
- int next_desc_to_proc;
-};
-
-struct mvpp2_rx_queue {
- /* RX queue number, in the range 0-31 for physical RXQs */
- u8 id;
-
- /* Num of rx descriptors in the rx descriptor ring */
- int size;
-
- u32 pkts_coal;
- u32 time_coal;
-
- /* Virtual address of the RX DMA descriptors array */
- struct mvpp2_rx_desc *descs;
-
- /* DMA address of the RX DMA descriptors array */
- dma_addr_t descs_dma;
-
- /* Index of the last RX DMA descriptor */
- int last_desc;
-
- /* Index of the next RX DMA descriptor to process */
- int next_desc_to_proc;
-
- /* ID of port to which physical RXQ is mapped */
- int port;
-
- /* Port's logic RXQ number to which physical RXQ is mapped */
- int logic_rxq;
-};
-
-union mvpp2_prs_tcam_entry {
- u32 word[MVPP2_PRS_TCAM_WORDS];
- u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
-};
-
-union mvpp2_prs_sram_entry {
- u32 word[MVPP2_PRS_SRAM_WORDS];
- u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
-};
-
-struct mvpp2_prs_entry {
- u32 index;
- union mvpp2_prs_tcam_entry tcam;
- union mvpp2_prs_sram_entry sram;
-};
-
-struct mvpp2_prs_shadow {
- bool valid;
- bool finish;
-
- /* Lookup ID */
- int lu;
-
- /* User defined offset */
- int udf;
-
- /* Result info */
- u32 ri;
- u32 ri_mask;
-};
-
-struct mvpp2_cls_flow_entry {
- u32 index;
- u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
-};
-
-struct mvpp2_cls_lookup_entry {
- u32 lkpid;
- u32 way;
- u32 data;
-};
-
-struct mvpp2_bm_pool {
- /* Pool number in the range 0-7 */
- int id;
-
- /* Buffer Pointers Pool External (BPPE) size */
- int size;
- /* BPPE size in bytes */
- int size_bytes;
- /* Number of buffers for this pool */
- int buf_num;
- /* Pool buffer size */
- int buf_size;
- /* Packet size */
- int pkt_size;
- int frag_size;
-
- /* BPPE virtual base address */
- u32 *virt_addr;
- /* BPPE DMA base address */
- dma_addr_t dma_addr;
-
- /* Ports using BM pool */
- u32 port_map;
-};
-
-#define IS_TSO_HEADER(txq_pcpu, addr) \
- ((addr) >= (txq_pcpu)->tso_headers_dma && \
- (addr) < (txq_pcpu)->tso_headers_dma + \
- (txq_pcpu)->size * TSO_HEADER_SIZE)
-
-/* Queue modes */
-#define MVPP2_QDIST_SINGLE_MODE 0
-#define MVPP2_QDIST_MULTI_MODE 1
-
-static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
-
-module_param(queue_mode, int, 0444);
-MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
-
-#define MVPP2_DRIVER_NAME "mvpp2"
-#define MVPP2_DRIVER_VERSION "1.0"
-
-/* Utility/helper methods */
-
-static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
-{
- writel(data, priv->swth_base[0] + offset);
-}
-
-static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
-{
- return readl(priv->swth_base[0] + offset);
-}
-
-static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
-{
- return readl_relaxed(priv->swth_base[0] + offset);
-}
-/* These accessors should be used to access:
- *
- * - per-CPU registers, where each CPU has its own copy of the
- * register.
- *
- * MVPP2_BM_VIRT_ALLOC_REG
- * MVPP2_BM_ADDR_HIGH_ALLOC
- * MVPP22_BM_ADDR_HIGH_RLS_REG
- * MVPP2_BM_VIRT_RLS_REG
- * MVPP2_ISR_RX_TX_CAUSE_REG
- * MVPP2_ISR_RX_TX_MASK_REG
- * MVPP2_TXQ_NUM_REG
- * MVPP2_AGGR_TXQ_UPDATE_REG
- * MVPP2_TXQ_RSVD_REQ_REG
- * MVPP2_TXQ_RSVD_RSLT_REG
- * MVPP2_TXQ_SENT_REG
- * MVPP2_RXQ_NUM_REG
- *
- * - global registers that must be accessed through a specific CPU
- * window, because they are related to an access to a per-CPU
- * register
- *
- * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
- * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
- * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
- */
-static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
- u32 offset, u32 data)
-{
- writel(data, priv->swth_base[cpu] + offset);
-}
-
-static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
- u32 offset)
-{
- return readl(priv->swth_base[cpu] + offset);
-}
-
-static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
- u32 offset, u32 data)
-{
- writel_relaxed(data, priv->swth_base[cpu] + offset);
-}
-
-static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
- u32 offset)
-{
- return readl_relaxed(priv->swth_base[cpu] + offset);
-}
-
-static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.buf_dma_addr;
- else
- return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
-}
-
-static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc,
- dma_addr_t dma_addr)
-{
- dma_addr_t addr, offset;
-
- addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
- offset = dma_addr & MVPP2_TX_DESC_ALIGN;
-
- if (port->priv->hw_version == MVPP21) {
- tx_desc->pp21.buf_dma_addr = addr;
- tx_desc->pp21.packet_offset = offset;
- } else {
- u64 val = (u64)addr;
-
- tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
- tx_desc->pp22.buf_dma_addr_ptp |= val;
- tx_desc->pp22.packet_offset = offset;
- }
-}
-
-static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.data_size;
- else
- return tx_desc->pp22.data_size;
-}
-
-static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc,
- size_t size)
-{
- if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.data_size = size;
- else
- tx_desc->pp22.data_size = size;
-}
-
-static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc,
- unsigned int txq)
-{
- if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.phys_txq = txq;
- else
- tx_desc->pp22.phys_txq = txq;
-}
-
-static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc,
- unsigned int command)
-{
- if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.command = command;
- else
- tx_desc->pp22.command = command;
-}
-
-static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.packet_offset;
- else
- return tx_desc->pp22.packet_offset;
-}
-
-static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_dma_addr;
- else
- return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
-}
-
-static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_cookie;
- else
- return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
-}
-
-static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.data_size;
- else
- return rx_desc->pp22.data_size;
-}
-
-static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.status;
- else
- return rx_desc->pp22.status;
-}
-
-static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
-{
- txq_pcpu->txq_get_index++;
- if (txq_pcpu->txq_get_index == txq_pcpu->size)
- txq_pcpu->txq_get_index = 0;
-}
-
-static void mvpp2_txq_inc_put(struct mvpp2_port *port,
- struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb,
- struct mvpp2_tx_desc *tx_desc)
-{
- struct mvpp2_txq_pcpu_buf *tx_buf =
- txq_pcpu->buffs + txq_pcpu->txq_put_index;
- tx_buf->skb = skb;
- tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
- tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
- mvpp2_txdesc_offset_get(port, tx_desc);
- txq_pcpu->txq_put_index++;
- if (txq_pcpu->txq_put_index == txq_pcpu->size)
- txq_pcpu->txq_put_index = 0;
-}
-
-/* Get number of physical egress port */
-static inline int mvpp2_egress_port(struct mvpp2_port *port)
-{
- return MVPP2_MAX_TCONT + port->id;
-}
-
-/* Get number of physical TXQ */
-static inline int mvpp2_txq_phys(int port, int txq)
-{
- return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
-}
-
-/* Parser configuration routines */
-
-/* Update parser tcam and sram hw entries */
-static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
-{
- int i;
-
- if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
- return -EINVAL;
-
- /* Clear entry invalidation bit */
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
-
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
-
- /* Write sram index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
-
- return 0;
-}
-
-/* Initialize tcam entry from hw */
-static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
- struct mvpp2_prs_entry *pe, int tid)
-{
- int i;
-
- if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
- return -EINVAL;
-
- memset(pe, 0, sizeof(*pe));
- pe->index = tid;
-
- /* Write tcam index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
-
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
- MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
- if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
- return MVPP2_PRS_TCAM_ENTRY_INVALID;
-
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
-
- /* Write sram index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
- for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
-
- return 0;
-}
-
-/* Invalidate tcam hw entry */
-static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
-{
- /* Write index - indirect access */
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
- MVPP2_PRS_TCAM_INV_MASK);
-}
-
-/* Enable shadow table entry and set its lookup ID */
-static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
-{
- priv->prs_shadow[index].valid = true;
- priv->prs_shadow[index].lu = lu;
-}
-
-/* Update ri fields in shadow table entry */
-static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
- unsigned int ri, unsigned int ri_mask)
-{
- priv->prs_shadow[index].ri_mask = ri_mask;
- priv->prs_shadow[index].ri = ri;
-}
-
-/* Update lookup field in tcam sw entry */
-static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
-{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
- pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
-}
-
-/* Update mask for single port in tcam sw entry */
-static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
- unsigned int port, bool add)
-{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- if (add)
- pe->tcam.byte[enable_off] &= ~(1 << port);
- else
- pe->tcam.byte[enable_off] |= 1 << port;
-}
-
-/* Update port map in tcam sw entry */
-static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
- unsigned int ports)
-{
- unsigned char port_mask = MVPP2_PRS_PORT_MASK;
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
- pe->tcam.byte[enable_off] &= ~port_mask;
- pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
-}
-
-/* Obtain port map from tcam sw entry */
-static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
-{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
-}
-
-/* Set byte of data and its enable bits in tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
- unsigned int offs, unsigned char byte,
- unsigned char enable)
-{
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
-}
-
-/* Get byte of data and its enable bits from tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
- unsigned int offs, unsigned char *byte,
- unsigned char *enable)
-{
- *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
- *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
-}
-
-/* Compare tcam data bytes with a pattern */
-static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
- u16 data)
-{
- int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
- u16 tcam_data;
-
- tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
- if (tcam_data != data)
- return false;
- return true;
-}
-
-/* Update ai bits in tcam sw entry */
-static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
- unsigned int bits, unsigned int enable)
-{
- int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
-
- for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
-
- if (!(enable & BIT(i)))
- continue;
-
- if (bits & BIT(i))
- pe->tcam.byte[ai_idx] |= 1 << i;
- else
- pe->tcam.byte[ai_idx] &= ~(1 << i);
- }
-
- pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
-}
-
-/* Get ai bits from tcam sw entry */
-static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
-{
- return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
-}
-
-/* Set ethertype in tcam sw entry */
-static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
- unsigned short ethertype)
-{
- mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
- mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
-}
-
-/* Set vid in tcam sw entry */
-static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
- unsigned short vid)
-{
- mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
- mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
-}
-
-/* Set bits in sram sw entry */
-static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
-{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
-}
-
-/* Clear bits in sram sw entry */
-static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
-{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
-}
-
-/* Update ri bits in sram sw entry */
-static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
- unsigned int bits, unsigned int mask)
-{
- unsigned int i;
-
- for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
- int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
-
- if (!(mask & BIT(i)))
- continue;
-
- if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
- else
- mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
-
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
- }
-}
-
-/* Obtain ri bits from sram sw entry */
-static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
-{
- return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
-}
-
-/* Update ai bits in sram sw entry */
-static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
- unsigned int bits, unsigned int mask)
-{
- unsigned int i;
- int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
-
- for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
-
- if (!(mask & BIT(i)))
- continue;
-
- if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
- else
- mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
-
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
- }
-}
-
-/* Read ai bits from sram sw entry */
-static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
-{
- u8 bits;
- int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
- int ai_en_off = ai_off + 1;
- int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
-
- bits = (pe->sram.byte[ai_off] >> ai_shift) |
- (pe->sram.byte[ai_en_off] << (8 - ai_shift));
-
- return bits;
-}
-
-/* In sram sw entry set lookup ID field of the tcam key to be used in the next
- * lookup interation
- */
-static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
- unsigned int lu)
-{
- int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
-
- mvpp2_prs_sram_bits_clear(pe, sram_next_off,
- MVPP2_PRS_SRAM_NEXT_LU_MASK);
- mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
-}
-
-/* In the sram sw entry set sign and value of the next lookup offset
- * and the offset value generated to the classifier
- */
-static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
- unsigned int op)
-{
- /* Set sign */
- if (shift < 0) {
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
- shift = 0 - shift;
- } else {
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
- }
-
- /* Set value */
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
- (unsigned char)shift;
-
- /* Reset and set operation */
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
-
- /* Set base offset as current */
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
-}
-
-/* In the sram sw entry set sign and value of the user defined offset
- * generated to the classifier
- */
-static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
- unsigned int type, int offset,
- unsigned int op)
-{
- /* Set sign */
- if (offset < 0) {
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
- offset = 0 - offset;
- } else {
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
- }
-
- /* Set value */
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
- MVPP2_PRS_SRAM_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] |=
- (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
-
- /* Set offset type */
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
- MVPP2_PRS_SRAM_UDF_TYPE_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
-
- /* Set offset operation */
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
- MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
- (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
- (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
- /* Set base offset as current */
- mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
-}
-
-/* Find parser flow entry */
-static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
- for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
- u8 bits;
-
- if (!priv->prs_shadow[tid].valid ||
- priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- bits = mvpp2_prs_sram_ai_get(&pe);
-
- /* Sram store classification lookup ID in AI bits [5:0] */
- if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
- return tid;
- }
-
- return -ENOENT;
-}
-
-/* Return first free tcam index, seeking from start to end */
-static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
- unsigned char end)
-{
- int tid;
-
- if (start > end)
- swap(start, end);
-
- if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
- end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
-
- for (tid = start; tid <= end; tid++) {
- if (!priv->prs_shadow[tid].valid)
- return tid;
- }
-
- return -EINVAL;
-}
-
-/* Enable/disable dropping all mac da's */
-static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
-{
- struct mvpp2_prs_entry pe;
-
- if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
- /* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
- } else {
- /* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
- pe.index = MVPP2_PE_DROP_ALL;
-
- /* Non-promiscuous mode for all ports - DROP unknown packets */
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
- MVPP2_PRS_RI_DROP_MASK);
-
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
- }
-
- /* Update port mask */
- mvpp2_prs_tcam_port_set(&pe, port, add);
-
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Set port to unicast or multicast promiscuous mode */
-static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
- enum mvpp2_prs_l2_cast l2_cast, bool add)
-{
- struct mvpp2_prs_entry pe;
- unsigned char cast_match;
- unsigned int ri;
- int tid;
-
- if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
- cast_match = MVPP2_PRS_UCAST_VAL;
- tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
- ri = MVPP2_PRS_RI_L2_UCAST;
- } else {
- cast_match = MVPP2_PRS_MCAST_VAL;
- tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
- ri = MVPP2_PRS_RI_L2_MCAST;
- }
-
- /* promiscuous mode - Accept unknown unicast or multicast packets */
- if (priv->prs_shadow[tid].valid) {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- } else {
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
- pe.index = tid;
-
- /* Continue - set next lookup */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
-
- /* Set result info bits */
- mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
-
- /* Match UC or MC addresses */
- mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
- MVPP2_PRS_CAST_MASK);
-
- /* Shift to ethertype */
- mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
- }
-
- /* Update port mask */
- mvpp2_prs_tcam_port_set(&pe, port, add);
-
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Set entry for dsa packets */
-static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
- bool tagged, bool extend)
-{
- struct mvpp2_prs_entry pe;
- int tid, shift;
-
- if (extend) {
- tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
- shift = 8;
- } else {
- tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
- shift = 4;
- }
-
- if (priv->prs_shadow[tid].valid) {
- /* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- } else {
- /* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
- pe.index = tid;
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
-
- if (tagged) {
- /* Set tagged bit in DSA tag */
- mvpp2_prs_tcam_data_byte_set(&pe, 0,
- MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
- MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
-
- /* Set ai bits for next iteration */
- if (extend)
- mvpp2_prs_sram_ai_update(&pe, 1,
- MVPP2_PRS_SRAM_AI_MASK);
- else
- mvpp2_prs_sram_ai_update(&pe, 0,
- MVPP2_PRS_SRAM_AI_MASK);
-
- /* If packet is tagged continue check vid filtering */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
- } else {
- /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
- mvpp2_prs_sram_shift_set(&pe, shift,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Set result info bits to 'no vlans' */
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
- MVPP2_PRS_RI_VLAN_MASK);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
- }
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
- }
-
- /* Update port mask */
- mvpp2_prs_tcam_port_set(&pe, port, add);
-
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Set entry for dsa ethertype */
-static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
- bool add, bool tagged, bool extend)
-{
- struct mvpp2_prs_entry pe;
- int tid, shift, port_mask;
-
- if (extend) {
- tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
- MVPP2_PE_ETYPE_EDSA_UNTAGGED;
- port_mask = 0;
- shift = 8;
- } else {
- tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
- MVPP2_PE_ETYPE_DSA_UNTAGGED;
- port_mask = MVPP2_PRS_PORT_MASK;
- shift = 4;
- }
-
- if (priv->prs_shadow[tid].valid) {
- /* Entry exist - update port only */
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- } else {
- /* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
- pe.index = tid;
-
- /* Set ethertype */
- mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
- mvpp2_prs_match_etype(&pe, 2, 0);
-
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
- MVPP2_PRS_RI_DSA_MASK);
- /* Shift ethertype + 2 byte reserved + tag*/
- mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
-
- if (tagged) {
- /* Set tagged bit in DSA tag */
- mvpp2_prs_tcam_data_byte_set(&pe,
- MVPP2_ETH_TYPE_LEN + 2 + 3,
- MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
- MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0,
- MVPP2_PRS_SRAM_AI_MASK);
- /* If packet is tagged continue check vlans */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
- } else {
- /* Set result info bits to 'no vlans' */
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
- MVPP2_PRS_RI_VLAN_MASK);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
- }
- /* Mask/unmask all ports, depending on dsa type */
- mvpp2_prs_tcam_port_map_set(&pe, port_mask);
- }
-
- /* Update port mask */
- mvpp2_prs_tcam_port_set(&pe, port, add);
-
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Search for existing single/triple vlan entry */
-static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- /* Go through the all entries with MVPP2_PRS_LU_VLAN */
- for (tid = MVPP2_PE_FIRST_FREE_TID;
- tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
- unsigned int ri_bits, ai_bits;
- bool match;
-
- if (!priv->prs_shadow[tid].valid ||
- priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
- if (!match)
- continue;
-
- /* Get vlan type */
- ri_bits = mvpp2_prs_sram_ri_get(&pe);
- ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
-
- /* Get current ai value from tcam */
- ai_bits = mvpp2_prs_tcam_ai_get(&pe);
- /* Clear double vlan bit */
- ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
-
- if (ai != ai_bits)
- continue;
-
- if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
- ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
- return tid;
- }
-
- return -ENOENT;
-}
-
-/* Add/update single/triple vlan entry */
-static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
- unsigned int port_map)
-{
- struct mvpp2_prs_entry pe;
- int tid_aux, tid;
- int ret = 0;
-
- memset(&pe, 0, sizeof(pe));
-
- tid = mvpp2_prs_vlan_find(priv, tpid, ai);
-
- if (tid < 0) {
- /* Create new tcam entry */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
- MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
- return tid;
-
- /* Get last double vlan tid */
- for (tid_aux = MVPP2_PE_LAST_FREE_TID;
- tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
- unsigned int ri_bits;
-
- if (!priv->prs_shadow[tid_aux].valid ||
- priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
- ri_bits = mvpp2_prs_sram_ri_get(&pe);
- if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
- MVPP2_PRS_RI_VLAN_DOUBLE)
- break;
- }
-
- if (tid <= tid_aux)
- return -EINVAL;
-
- memset(&pe, 0, sizeof(pe));
- pe.index = tid;
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
-
- mvpp2_prs_match_etype(&pe, 0, tpid);
-
- /* VLAN tag detected, proceed with VID filtering */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
-
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
-
- if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
- MVPP2_PRS_RI_VLAN_MASK);
- } else {
- ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
- MVPP2_PRS_RI_VLAN_MASK);
- }
- mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
-
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
- } else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- }
- /* Update ports' mask */
- mvpp2_prs_tcam_port_map_set(&pe, port_map);
-
- mvpp2_prs_hw_write(priv, &pe);
-
- return ret;
-}
-
-/* Get first free double vlan ai number */
-static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
-{
- int i;
-
- for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
- if (!priv->prs_double_vlans[i])
- return i;
- }
-
- return -EINVAL;
-}
-
-/* Search for existing double vlan entry */
-static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
- unsigned short tpid2)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- /* Go through the all entries with MVPP2_PRS_LU_VLAN */
- for (tid = MVPP2_PE_FIRST_FREE_TID;
- tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
- unsigned int ri_mask;
- bool match;
-
- if (!priv->prs_shadow[tid].valid ||
- priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid);
-
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
- mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
-
- if (!match)
- continue;
-
- ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
- if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
- return tid;
- }
-
- return -ENOENT;
-}
-
-/* Add or update double vlan entry */
-static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
- unsigned short tpid2,
- unsigned int port_map)
-{
- int tid_aux, tid, ai, ret = 0;
- struct mvpp2_prs_entry pe;
-
- memset(&pe, 0, sizeof(pe));
-
- tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
-
- if (tid < 0) {
- /* Create new tcam entry */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- /* Set ai value for new double vlan entry */
- ai = mvpp2_prs_double_vlan_ai_free_get(priv);
- if (ai < 0)
- return ai;
-
- /* Get first single/triple vlan tid */
- for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
- tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
- unsigned int ri_bits;
-
- if (!priv->prs_shadow[tid_aux].valid ||
- priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
- ri_bits = mvpp2_prs_sram_ri_get(&pe);
- ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
- if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
- ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
- break;
- }
-
- if (tid >= tid_aux)
- return -ERANGE;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
- pe.index = tid;
-
- priv->prs_double_vlans[ai] = true;
-
- mvpp2_prs_match_etype(&pe, 0, tpid1);
- mvpp2_prs_match_etype(&pe, 4, tpid2);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
- /* Shift 4 bytes - skip outer vlan tag */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
- MVPP2_PRS_RI_VLAN_MASK);
- mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
- MVPP2_PRS_SRAM_AI_MASK);
-
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
- } else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- }
-
- /* Update ports' mask */
- mvpp2_prs_tcam_port_map_set(&pe, port_map);
- mvpp2_prs_hw_write(priv, &pe);
-
- return ret;
-}
-
-/* IPv4 header parsing for fragmentation and L4 offset */
-static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
- unsigned int ri, unsigned int ri_mask)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
- (proto != IPPROTO_IGMP))
- return -EINVAL;
-
- /* Not fragmented packet */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
- pe.index = tid;
-
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
-
- mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
- MVPP2_PRS_TCAM_PROTO_MASK_L);
- mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
- MVPP2_PRS_TCAM_PROTO_MASK);
-
- mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Fragmented packet */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
- /* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
-
- mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
- ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
-
- mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
- mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* IPv4 L3 multicast or broadcast */
-static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
-{
- struct mvpp2_prs_entry pe;
- int mask, tid;
-
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
- pe.index = tid;
-
- switch (l3_cast) {
- case MVPP2_PRS_L3_MULTI_CAST:
- mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
- MVPP2_PRS_IPV4_MC_MASK);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
- MVPP2_PRS_RI_L3_ADDR_MASK);
- break;
- case MVPP2_PRS_L3_BROAD_CAST:
- mask = MVPP2_PRS_IPV4_BC_MASK;
- mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
- mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
- mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
- mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
- MVPP2_PRS_RI_L3_ADDR_MASK);
- break;
- default:
- return -EINVAL;
- }
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
-
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Set entries for protocols over IPv6 */
-static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
- unsigned int ri, unsigned int ri_mask)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
- (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
- return -EINVAL;
-
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
- pe.index = tid;
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct ipv6hdr) - 6,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
- MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Write HW */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* IPv6 L3 multicast entry */
-static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
- return -EINVAL;
-
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
- pe.index = tid;
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
- MVPP2_PRS_RI_L3_ADDR_MASK);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
- MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
- /* Shift back to IPv6 NH */
- mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
- MVPP2_PRS_IPV6_MC_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Parser per-port initialization */
-static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
- int lu_max, int offset)
-{
- u32 val;
-
- /* Set lookup ID */
- val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
- val &= ~MVPP2_PRS_PORT_LU_MASK(port);
- val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
- mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
-
- /* Set maximum number of loops for packet received from port */
- val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
- val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
- val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
- mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
-
- /* Set initial offset for packet header extraction for the first
- * searching loop
- */
- val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
- val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
- val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
- mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
-}
-
-/* Default flow entries initialization for all ports */
-static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
- int port;
-
- for (port = 0; port < MVPP2_MAX_PORTS; port++) {
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
-
- /* Set flow ID*/
- mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_hw_write(priv, &pe);
- }
-}
-
-/* Set default entry for Marvell Header field */
-static void mvpp2_prs_mh_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
-
- memset(&pe, 0, sizeof(pe));
-
- pe.index = MVPP2_PE_MH_DEFAULT;
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
- mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Set default entires (place holder) for promiscuous, non-promiscuous and
- * multicast MAC addresses
- */
-static void mvpp2_prs_mac_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
-
- memset(&pe, 0, sizeof(pe));
-
- /* Non-promiscuous mode for all ports - DROP unknown packets */
- pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
-
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
- MVPP2_PRS_RI_DROP_MASK);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Create dummy entries for drop all and promiscuous modes */
- mvpp2_prs_mac_drop_all_set(priv, 0, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
- mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
-}
-
-/* Set default entries for various types of dsa packets */
-static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
-
- /* None tagged EDSA entry - place holder */
- mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
- MVPP2_PRS_EDSA);
-
- /* Tagged EDSA entry - place holder */
- mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
-
- /* None tagged DSA entry - place holder */
- mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
- MVPP2_PRS_DSA);
-
- /* Tagged DSA entry - place holder */
- mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
-
- /* None tagged EDSA ethertype entry - place holder*/
- mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
-
- /* Tagged EDSA ethertype entry - place holder*/
- mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
- MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
-
- /* None tagged DSA ethertype entry */
- mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
-
- /* Tagged DSA ethertype entry */
- mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
- MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
-
- /* Set default entry, in case DSA or EDSA tag not found */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
- pe.index = MVPP2_PE_DSA_DEFAULT;
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
-
- /* Shift 0 bytes */
- mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
-
- /* Clear all sram ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Initialize parser entries for VID filtering */
-static void mvpp2_prs_vid_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
-
- memset(&pe, 0, sizeof(pe));
-
- /* Set default vid entry */
- pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
-
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
-
- /* Skip VLAN header - Set offset to 4 bytes */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Set default vid entry for extended DSA*/
- memset(&pe, 0, sizeof(pe));
-
- /* Set default vid entry */
- pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
-
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
- MVPP2_PRS_EDSA_VID_AI_BIT);
-
- /* Skip VLAN header - Set offset to 8 bytes */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Match basic ethertypes */
-static int mvpp2_prs_etype_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- /* Ethertype: PPPoE */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
-
- mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
- MVPP2_PRS_RI_PPPOE_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
- MVPP2_PRS_RI_PPPOE_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: ARP */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
-
- /* Generate flow in the next iteration*/
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = true;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: LBTD */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
-
- /* Generate flow in the next iteration*/
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
- MVPP2_PRS_RI_UDF3_RX_SPECIAL,
- MVPP2_PRS_RI_CPU_CODE_MASK |
- MVPP2_PRS_RI_UDF3_MASK);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = true;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
- MVPP2_PRS_RI_UDF3_RX_SPECIAL,
- MVPP2_PRS_RI_CPU_CODE_MASK |
- MVPP2_PRS_RI_UDF3_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: IPv4 without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: IPv4 with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
-
- /* Clear tcam data before updating */
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
-
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD,
- MVPP2_PRS_IPV4_HEAD_MASK);
-
- /* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: IPv6 without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
-
- /* Skip DIP of IPV6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
- MVPP2_MAX_L3_ADDR_SIZE,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = MVPP2_PE_ETH_TYPE_UN;
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Generate flow in the next iteration*/
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Set L3 offset even it's unknown L3 */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = true;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Configure vlan entries and detect up to 2 successive VLAN tags.
- * Possible options:
- * 0x8100, 0x88A8
- * 0x8100, 0x8100
- * 0x8100
- * 0x88A8
- */
-static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
- int err;
-
- priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
- MVPP2_PRS_DBL_VLANS_MAX,
- GFP_KERNEL);
- if (!priv->prs_double_vlans)
- return -ENOMEM;
-
- /* Double VLAN: 0x8100, 0x88A8 */
- err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
- MVPP2_PRS_PORT_MASK);
- if (err)
- return err;
-
- /* Double VLAN: 0x8100, 0x8100 */
- err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
- MVPP2_PRS_PORT_MASK);
- if (err)
- return err;
-
- /* Single VLAN: 0x88a8 */
- err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
- MVPP2_PRS_PORT_MASK);
- if (err)
- return err;
-
- /* Single VLAN: 0x8100 */
- err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
- MVPP2_PRS_PORT_MASK);
- if (err)
- return err;
-
- /* Set default double vlan entry */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
- pe.index = MVPP2_PE_VLAN_DBL;
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
-
- /* Clear ai for next iterations */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
- MVPP2_PRS_RI_VLAN_MASK);
-
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
- MVPP2_PRS_DBL_VLAN_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Set default vlan none entry */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
- pe.index = MVPP2_PE_VLAN_NONE;
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
- MVPP2_PRS_RI_VLAN_MASK);
-
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Set entries for PPPoE ethertype */
-static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- /* IPv4 over PPPoE with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, PPP_IP);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* IPv4 over PPPoE without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
-
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
-
- /* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* IPv6 over PPPoE */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Non-IP over PPPoE */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- pe.index = tid;
-
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
- MVPP2_PRS_RI_L3_PROTO_MASK);
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- /* Set L3 offset even if it's unknown L3 */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Initialize entries for IPv4 */
-static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
- int err;
-
- /* Set entries for TCP, UDP and IGMP over IPv4 */
- err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
- MVPP2_PRS_RI_L4_PROTO_MASK);
- if (err)
- return err;
-
- err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
- MVPP2_PRS_RI_L4_PROTO_MASK);
- if (err)
- return err;
-
- err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
- MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
- MVPP2_PRS_RI_UDF3_RX_SPECIAL,
- MVPP2_PRS_RI_CPU_CODE_MASK |
- MVPP2_PRS_RI_UDF3_MASK);
- if (err)
- return err;
-
- /* IPv4 Broadcast */
- err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
- if (err)
- return err;
-
- /* IPv4 Multicast */
- err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
- if (err)
- return err;
-
- /* Default IPv4 entry for unknown protocols */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
- pe.index = MVPP2_PE_IP4_PROTO_UN;
-
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
- MVPP2_PRS_RI_L4_PROTO_MASK);
-
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Default IPv4 entry for unicast address */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
- pe.index = MVPP2_PE_IP4_ADDR_UN;
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
- MVPP2_PRS_RI_L3_ADDR_MASK);
-
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Initialize entries for IPv6 */
-static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
-{
- struct mvpp2_prs_entry pe;
- int tid, err;
-
- /* Set entries for TCP, UDP and ICMP over IPv6 */
- err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
- MVPP2_PRS_RI_L4_TCP,
- MVPP2_PRS_RI_L4_PROTO_MASK);
- if (err)
- return err;
-
- err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
- MVPP2_PRS_RI_L4_UDP,
- MVPP2_PRS_RI_L4_PROTO_MASK);
- if (err)
- return err;
-
- err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
- MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
- MVPP2_PRS_RI_UDF3_RX_SPECIAL,
- MVPP2_PRS_RI_CPU_CODE_MASK |
- MVPP2_PRS_RI_UDF3_MASK);
- if (err)
- return err;
-
- /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
- /* Result Info: UDF7=1, DS lite */
- err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
- MVPP2_PRS_RI_UDF7_IP6_LITE,
- MVPP2_PRS_RI_UDF7_MASK);
- if (err)
- return err;
-
- /* IPv6 multicast */
- err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
- if (err)
- return err;
-
- /* Entry for checking hop limit */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
- pe.index = tid;
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
- MVPP2_PRS_RI_DROP_MASK,
- MVPP2_PRS_RI_L3_PROTO_MASK |
- MVPP2_PRS_RI_DROP_MASK);
-
- mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
- MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Default IPv6 entry for unknown protocols */
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
- pe.index = MVPP2_PE_IP6_PROTO_UN;
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
- MVPP2_PRS_RI_L4_PROTO_MASK);
- /* Set L4 offset relatively to our current place */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct ipv6hdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
- MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Default IPv6 entry for unknown ext protocols */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
- pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
-
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
- MVPP2_PRS_RI_L4_PROTO_MASK);
-
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
- MVPP2_PRS_IPV6_EXT_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Default IPv6 entry for unicast address */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
- pe.index = MVPP2_PE_IP6_ADDR_UN;
-
- /* Finished: go to IPv6 again */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
- MVPP2_PRS_RI_L3_ADDR_MASK);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
- MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
- /* Shift back to IPV6 NH */
- mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
- /* Unmask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Find tcam entry with matched pair <vid,port> */
-static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
- u16 mask)
-{
- unsigned char byte[2], enable[2];
- struct mvpp2_prs_entry pe;
- u16 rvid, rmask;
- int tid;
-
- /* Go through the all entries with MVPP2_PRS_LU_VID */
- for (tid = MVPP2_PE_VID_FILT_RANGE_START;
- tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
- if (!priv->prs_shadow[tid].valid ||
- priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid);
-
- mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
- mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
-
- rvid = ((byte[0] & 0xf) << 8) + byte[1];
- rmask = ((enable[0] & 0xf) << 8) + enable[1];
-
- if (rvid != vid || rmask != mask)
- continue;
-
- return tid;
- }
-
- return -ENOENT;
-}
-
-/* Write parser entry for VID filtering */
-static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
-{
- unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
- port->id * MVPP2_PRS_VLAN_FILT_MAX;
- unsigned int mask = 0xfff, reg_val, shift;
- struct mvpp2 *priv = port->priv;
- struct mvpp2_prs_entry pe;
- int tid;
-
- memset(&pe, 0, sizeof(pe));
-
- /* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
-
- reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
- if (reg_val & MVPP2_DSA_EXTENDED)
- shift = MVPP2_VLAN_TAG_EDSA_LEN;
- else
- shift = MVPP2_VLAN_TAG_LEN;
-
- /* No such entry */
- if (tid < 0) {
-
- /* Go through all entries from first to last in vlan range */
- tid = mvpp2_prs_tcam_first_free(priv, vid_start,
- vid_start +
- MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
-
- /* There isn't room for a new VID filter */
- if (tid < 0)
- return tid;
-
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
- pe.index = tid;
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
- } else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- }
-
- /* Enable the current port */
- mvpp2_prs_tcam_port_set(&pe, port->id, true);
-
- /* Continue - set next lookup */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
-
- /* Skip VLAN header - Set offset to 4 or 8 bytes */
- mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Set match on VID */
- mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
-
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-/* Write parser entry for VID filtering */
-static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
-{
- struct mvpp2 *priv = port->priv;
- int tid;
-
- /* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
-
- /* No such entry */
- if (tid < 0)
- return;
-
- mvpp2_prs_hw_inv(priv, tid);
- priv->prs_shadow[tid].valid = false;
-}
-
-/* Remove all existing VID filters on this port */
-static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- int tid;
-
- for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
- tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
- if (priv->prs_shadow[tid].valid)
- mvpp2_prs_vid_entry_remove(port, tid);
- }
-}
-
-/* Remove VID filering entry for this port */
-static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
-{
- unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
- struct mvpp2 *priv = port->priv;
-
- /* Invalidate the guard entry */
- mvpp2_prs_hw_inv(priv, tid);
-
- priv->prs_shadow[tid].valid = false;
-}
-
-/* Add guard entry that drops packets when no VID is matched on this port */
-static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
-{
- unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
- struct mvpp2 *priv = port->priv;
- unsigned int reg_val, shift;
- struct mvpp2_prs_entry pe;
-
- if (priv->prs_shadow[tid].valid)
- return;
-
- memset(&pe, 0, sizeof(pe));
-
- pe.index = tid;
-
- reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
- if (reg_val & MVPP2_DSA_EXTENDED)
- shift = MVPP2_VLAN_TAG_EDSA_LEN;
- else
- shift = MVPP2_VLAN_TAG_LEN;
-
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
-
- /* Update port mask */
- mvpp2_prs_tcam_port_set(&pe, port->id, true);
-
- /* Continue - set next lookup */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
-
- /* Skip VLAN header - Set offset to 4 or 8 bytes */
- mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Drop VLAN packets that don't belong to any VIDs on this port */
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
- MVPP2_PRS_RI_DROP_MASK);
-
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
- mvpp2_prs_hw_write(priv, &pe);
-}
-
-/* Parser default initialization */
-static int mvpp2_prs_default_init(struct platform_device *pdev,
- struct mvpp2 *priv)
-{
- int err, index, i;
-
- /* Enable tcam table */
- mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
-
- /* Clear all tcam and sram entries */
- for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
-
- mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
- for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
- }
-
- /* Invalidate all tcam entries */
- for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
- mvpp2_prs_hw_inv(priv, index);
-
- priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
- sizeof(*priv->prs_shadow),
- GFP_KERNEL);
- if (!priv->prs_shadow)
- return -ENOMEM;
-
- /* Always start from lookup = 0 */
- for (index = 0; index < MVPP2_MAX_PORTS; index++)
- mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
- MVPP2_PRS_PORT_LU_MAX, 0);
-
- mvpp2_prs_def_flow_init(priv);
-
- mvpp2_prs_mh_init(priv);
-
- mvpp2_prs_mac_init(priv);
-
- mvpp2_prs_dsa_init(priv);
-
- mvpp2_prs_vid_init(priv);
-
- err = mvpp2_prs_etype_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_vlan_init(pdev, priv);
- if (err)
- return err;
-
- err = mvpp2_prs_pppoe_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip6_init(priv);
- if (err)
- return err;
-
- err = mvpp2_prs_ip4_init(priv);
- if (err)
- return err;
-
- return 0;
-}
-
-/* Compare MAC DA with tcam entry data */
-static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
- const u8 *da, unsigned char *mask)
-{
- unsigned char tcam_byte, tcam_mask;
- int index;
-
- for (index = 0; index < ETH_ALEN; index++) {
- mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
- if (tcam_mask != mask[index])
- return false;
-
- if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
- return false;
- }
-
- return true;
-}
-
-/* Find tcam entry with matched pair <MAC DA, port> */
-static int
-mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
- unsigned char *mask, int udf_type)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- /* Go through the all entires with MVPP2_PRS_LU_MAC */
- for (tid = MVPP2_PE_MAC_RANGE_START;
- tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
- unsigned int entry_pmap;
-
- if (!priv->prs_shadow[tid].valid ||
- (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
- (priv->prs_shadow[tid].udf != udf_type))
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
-
- if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
- entry_pmap == pmap)
- return tid;
- }
-
- return -ENOENT;
-}
-
-/* Update parser's mac da entry */
-static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
- bool add)
-{
- unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- struct mvpp2 *priv = port->priv;
- unsigned int pmap, len, ri;
- struct mvpp2_prs_entry pe;
- int tid;
-
- memset(&pe, 0, sizeof(pe));
-
- /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
- tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
- MVPP2_PRS_UDF_MAC_DEF);
-
- /* No such entry */
- if (tid < 0) {
- if (!add)
- return 0;
-
- /* Create new TCAM entry */
- /* Go through the all entries from first to last */
- tid = mvpp2_prs_tcam_first_free(priv,
- MVPP2_PE_MAC_RANGE_START,
- MVPP2_PE_MAC_RANGE_END);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
-
- /* Mask all ports */
- mvpp2_prs_tcam_port_map_set(&pe, 0);
- } else {
- mvpp2_prs_init_from_hw(priv, &pe, tid);
- }
-
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
-
- /* Update port mask */
- mvpp2_prs_tcam_port_set(&pe, port->id, add);
-
- /* Invalidate the entry if no ports are left enabled */
- pmap = mvpp2_prs_tcam_port_map_get(&pe);
- if (pmap == 0) {
- if (add)
- return -EINVAL;
-
- mvpp2_prs_hw_inv(priv, pe.index);
- priv->prs_shadow[pe.index].valid = false;
- return 0;
- }
-
- /* Continue - set next lookup */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
-
- /* Set match on DA */
- len = ETH_ALEN;
- while (len--)
- mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
-
- /* Set result info bits */
- if (is_broadcast_ether_addr(da)) {
- ri = MVPP2_PRS_RI_L2_BCAST;
- } else if (is_multicast_ether_addr(da)) {
- ri = MVPP2_PRS_RI_L2_MCAST;
- } else {
- ri = MVPP2_PRS_RI_L2_UCAST;
-
- if (ether_addr_equal(da, port->dev->dev_addr))
- ri |= MVPP2_PRS_RI_MAC_ME_MASK;
- }
-
- mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
- MVPP2_PRS_RI_MAC_ME_MASK);
- mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
- MVPP2_PRS_RI_MAC_ME_MASK);
-
- /* Shift to ethertype */
- mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
- /* Update shadow table and hw entry */
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
- mvpp2_prs_hw_write(priv, &pe);
-
- return 0;
-}
-
-static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int err;
-
- /* Remove old parser entry */
- err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
- if (err)
- return err;
-
- /* Add new parser entry */
- err = mvpp2_prs_mac_da_accept(port, da, true);
- if (err)
- return err;
-
- /* Set addr in the device */
- ether_addr_copy(dev->dev_addr, da);
-
- return 0;
-}
-
-static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- struct mvpp2_prs_entry pe;
- unsigned long pmap;
- int index, tid;
-
- for (tid = MVPP2_PE_MAC_RANGE_START;
- tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
- unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
-
- if (!priv->prs_shadow[tid].valid ||
- (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
- (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
- continue;
-
- mvpp2_prs_init_from_hw(priv, &pe, tid);
-
- pmap = mvpp2_prs_tcam_port_map_get(&pe);
-
- /* We only want entries active on this port */
- if (!test_bit(port->id, &pmap))
- continue;
-
- /* Read mac addr from entry */
- for (index = 0; index < ETH_ALEN; index++)
- mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
- &da_mask[index]);
-
- /* Special cases : Don't remove broadcast and port's own
- * address
- */
- if (is_broadcast_ether_addr(da) ||
- ether_addr_equal(da, port->dev->dev_addr))
- continue;
-
- /* Remove entry from TCAM */
- mvpp2_prs_mac_da_accept(port, da, false);
- }
-}
-
-static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
-{
- switch (type) {
- case MVPP2_TAG_TYPE_EDSA:
- /* Add port to EDSA entries */
- mvpp2_prs_dsa_tag_set(priv, port, true,
- MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
- mvpp2_prs_dsa_tag_set(priv, port, true,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
- /* Remove port from DSA entries */
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
- break;
-
- case MVPP2_TAG_TYPE_DSA:
- /* Add port to DSA entries */
- mvpp2_prs_dsa_tag_set(priv, port, true,
- MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
- mvpp2_prs_dsa_tag_set(priv, port, true,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
- /* Remove port from EDSA entries */
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
- break;
-
- case MVPP2_TAG_TYPE_MH:
- case MVPP2_TAG_TYPE_NONE:
- /* Remove port form EDSA and DSA entries */
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
- mvpp2_prs_dsa_tag_set(priv, port, false,
- MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
- break;
-
- default:
- if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Set prs flow for the port */
-static int mvpp2_prs_def_flow(struct mvpp2_port *port)
-{
- struct mvpp2_prs_entry pe;
- int tid;
-
- memset(&pe, 0, sizeof(pe));
-
- tid = mvpp2_prs_flow_find(port->priv, port->id);
-
- /* Such entry not exist */
- if (tid < 0) {
- /* Go through the all entires from last to first */
- tid = mvpp2_prs_tcam_first_free(port->priv,
- MVPP2_PE_LAST_FREE_TID,
- MVPP2_PE_FIRST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
-
- /* Set flow ID*/
- mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
-
- /* Update shadow table */
- mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
- } else {
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
- }
-
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
- mvpp2_prs_hw_write(port->priv, &pe);
-
- return 0;
-}
-
-/* Classifier configuration routines */
-
-/* Update classification flow table registers */
-static void mvpp2_cls_flow_write(struct mvpp2 *priv,
- struct mvpp2_cls_flow_entry *fe)
-{
- mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
-}
-
-/* Update classification lookup table register */
-static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
- struct mvpp2_cls_lookup_entry *le)
-{
- u32 val;
-
- val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
- mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
- mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
-}
-
-/* Classifier default initialization */
-static void mvpp2_cls_init(struct mvpp2 *priv)
-{
- struct mvpp2_cls_lookup_entry le;
- struct mvpp2_cls_flow_entry fe;
- int index;
-
- /* Enable classifier */
- mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
-
- /* Clear classifier flow table */
- memset(&fe.data, 0, sizeof(fe.data));
- for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
- fe.index = index;
- mvpp2_cls_flow_write(priv, &fe);
- }
-
- /* Clear classifier lookup table */
- le.data = 0;
- for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
- le.lkpid = index;
- le.way = 0;
- mvpp2_cls_lookup_write(priv, &le);
-
- le.way = 1;
- mvpp2_cls_lookup_write(priv, &le);
- }
-}
-
-static void mvpp2_cls_port_config(struct mvpp2_port *port)
-{
- struct mvpp2_cls_lookup_entry le;
- u32 val;
-
- /* Set way for the port */
- val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
- val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
- mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
-
- /* Pick the entry to be accessed in lookup ID decoding table
- * according to the way and lkpid.
- */
- le.lkpid = port->id;
- le.way = 0;
- le.data = 0;
-
- /* Set initial CPU queue for receiving packets */
- le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
- le.data |= port->first_rxq;
-
- /* Disable classification engines */
- le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
-
- /* Update lookup ID table entry */
- mvpp2_cls_lookup_write(port->priv, &le);
-}
-
-/* Set CPU queue number for oversize packets */
-static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
-{
- u32 val;
-
- mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
- port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
-
- mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
- (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
-
- val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
- val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
- mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
-}
-
-static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
-{
- if (likely(pool->frag_size <= PAGE_SIZE))
- return netdev_alloc_frag(pool->frag_size);
- else
- return kmalloc(pool->frag_size, GFP_ATOMIC);
-}
-
-static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
-{
- if (likely(pool->frag_size <= PAGE_SIZE))
- skb_free_frag(data);
- else
- kfree(data);
-}
-
-/* Buffer Manager configuration routines */
-
-/* Create pool */
-static int mvpp2_bm_pool_create(struct platform_device *pdev,
- struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool, int size)
-{
- u32 val;
-
- /* Number of buffer pointers must be a multiple of 16, as per
- * hardware constraints
- */
- if (!IS_ALIGNED(size, 16))
- return -EINVAL;
-
- /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
- * bytes per buffer pointer
- */
- if (priv->hw_version == MVPP21)
- bm_pool->size_bytes = 2 * sizeof(u32) * size;
- else
- bm_pool->size_bytes = 2 * sizeof(u64) * size;
-
- bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
- &bm_pool->dma_addr,
- GFP_KERNEL);
- if (!bm_pool->virt_addr)
- return -ENOMEM;
-
- if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
- MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
- bm_pool->virt_addr, bm_pool->dma_addr);
- dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
- bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
- return -ENOMEM;
- }
-
- mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
- lower_32_bits(bm_pool->dma_addr));
- mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
-
- val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
- val |= MVPP2_BM_START_MASK;
- mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
-
- bm_pool->size = size;
- bm_pool->pkt_size = 0;
- bm_pool->buf_num = 0;
-
- return 0;
-}
-
-/* Set pool buffer size */
-static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool,
- int buf_size)
-{
- u32 val;
-
- bm_pool->buf_size = buf_size;
-
- val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
- mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
-}
-
-static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *dma_addr,
- phys_addr_t *phys_addr)
-{
- int cpu = get_cpu();
-
- *dma_addr = mvpp2_percpu_read(priv, cpu,
- MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
-
- if (priv->hw_version == MVPP22) {
- u32 val;
- u32 dma_addr_highbits, phys_addr_highbits;
-
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
- dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
- phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
- MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
-
- if (sizeof(dma_addr_t) == 8)
- *dma_addr |= (u64)dma_addr_highbits << 32;
-
- if (sizeof(phys_addr_t) == 8)
- *phys_addr |= (u64)phys_addr_highbits << 32;
- }
-
- put_cpu();
-}
-
-/* Free all buffers from the pool */
-static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool, int buf_num)
-{
- int i;
-
- if (buf_num > bm_pool->buf_num) {
- WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
- bm_pool->id, buf_num);
- buf_num = bm_pool->buf_num;
- }
-
- for (i = 0; i < buf_num; i++) {
- dma_addr_t buf_dma_addr;
- phys_addr_t buf_phys_addr;
- void *data;
-
- mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
- &buf_dma_addr, &buf_phys_addr);
-
- dma_unmap_single(dev, buf_dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE);
-
- data = (void *)phys_to_virt(buf_phys_addr);
- if (!data)
- break;
-
- mvpp2_frag_free(bm_pool, data);
- }
-
- /* Update BM driver with number of buffers removed from pool */
- bm_pool->buf_num -= i;
-}
-
-/* Check number of buffers in BM pool */
-static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
-{
- int buf_num = 0;
-
- buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
- MVPP22_BM_POOL_PTRS_NUM_MASK;
- buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
- MVPP2_BM_BPPI_PTR_NUM_MASK;
-
- /* HW has one buffer ready which is not reflected in the counters */
- if (buf_num)
- buf_num += 1;
-
- return buf_num;
-}
-
-/* Cleanup pool */
-static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
- struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool)
-{
- int buf_num;
- u32 val;
-
- buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
- mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
-
- /* Check buffer counters after free */
- buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
- if (buf_num) {
- WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
- bm_pool->id, bm_pool->buf_num);
- return 0;
- }
-
- val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
- val |= MVPP2_BM_STOP_MASK;
- mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
-
- dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
- bm_pool->virt_addr,
- bm_pool->dma_addr);
- return 0;
-}
-
-static int mvpp2_bm_pools_init(struct platform_device *pdev,
- struct mvpp2 *priv)
-{
- int i, err, size;
- struct mvpp2_bm_pool *bm_pool;
-
- /* Create all pools with maximum size */
- size = MVPP2_BM_POOL_SIZE_MAX;
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
- bm_pool = &priv->bm_pools[i];
- bm_pool->id = i;
- err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
- if (err)
- goto err_unroll_pools;
- mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
- }
- return 0;
-
-err_unroll_pools:
- dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
- for (i = i - 1; i >= 0; i--)
- mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
- return err;
-}
-
-static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
-{
- int i, err;
-
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
- /* Mask BM all interrupts */
- mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
- /* Clear BM cause register */
- mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
- }
-
- /* Allocate and initialize BM pools */
- priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
- sizeof(*priv->bm_pools), GFP_KERNEL);
- if (!priv->bm_pools)
- return -ENOMEM;
-
- err = mvpp2_bm_pools_init(pdev, priv);
- if (err < 0)
- return err;
- return 0;
-}
-
-static void mvpp2_setup_bm_pool(void)
-{
- /* Short pool */
- mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
- mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
-
- /* Long pool */
- mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
- mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
-
- /* Jumbo pool */
- mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
- mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
-}
-
-/* Attach long pool to rxq */
-static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
- int lrxq, int long_pool)
-{
- u32 val, mask;
- int prxq;
-
- /* Get queue physical ID */
- prxq = port->rxqs[lrxq]->id;
-
- if (port->priv->hw_version == MVPP21)
- mask = MVPP21_RXQ_POOL_LONG_MASK;
- else
- mask = MVPP22_RXQ_POOL_LONG_MASK;
-
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~mask;
- val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
- mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
-}
-
-/* Attach short pool to rxq */
-static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
- int lrxq, int short_pool)
-{
- u32 val, mask;
- int prxq;
-
- /* Get queue physical ID */
- prxq = port->rxqs[lrxq]->id;
-
- if (port->priv->hw_version == MVPP21)
- mask = MVPP21_RXQ_POOL_SHORT_MASK;
- else
- mask = MVPP22_RXQ_POOL_SHORT_MASK;
-
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~mask;
- val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
- mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
-}
-
-static void *mvpp2_buf_alloc(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *buf_dma_addr,
- phys_addr_t *buf_phys_addr,
- gfp_t gfp_mask)
-{
- dma_addr_t dma_addr;
- void *data;
-
- data = mvpp2_frag_alloc(bm_pool);
- if (!data)
- return NULL;
-
- dma_addr = dma_map_single(port->dev->dev.parent, data,
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
- mvpp2_frag_free(bm_pool, data);
- return NULL;
- }
- *buf_dma_addr = dma_addr;
- *buf_phys_addr = virt_to_phys(data);
-
- return data;
-}
-
-/* Release buffer to BM */
-static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_dma_addr,
- phys_addr_t buf_phys_addr)
-{
- int cpu = get_cpu();
-
- if (port->priv->hw_version == MVPP22) {
- u32 val = 0;
-
- if (sizeof(dma_addr_t) == 8)
- val |= upper_32_bits(buf_dma_addr) &
- MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
-
- if (sizeof(phys_addr_t) == 8)
- val |= (upper_32_bits(buf_phys_addr)
- << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
- MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
-
- mvpp2_percpu_write_relaxed(port->priv, cpu,
- MVPP22_BM_ADDR_HIGH_RLS_REG, val);
- }
-
- /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
- * returned in the "cookie" field of the RX
- * descriptor. Instead of storing the virtual address, we
- * store the physical address
- */
- mvpp2_percpu_write_relaxed(port->priv, cpu,
- MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
- mvpp2_percpu_write_relaxed(port->priv, cpu,
- MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
-
- put_cpu();
-}
-
-/* Allocate buffers for the pool */
-static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool, int buf_num)
-{
- int i, buf_size, total_size;
- dma_addr_t dma_addr;
- phys_addr_t phys_addr;
- void *buf;
-
- buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
- total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
-
- if (buf_num < 0 ||
- (buf_num + bm_pool->buf_num > bm_pool->size)) {
- netdev_err(port->dev,
- "cannot allocate %d buffers for pool %d\n",
- buf_num, bm_pool->id);
- return 0;
- }
-
- for (i = 0; i < buf_num; i++) {
- buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
- &phys_addr, GFP_KERNEL);
- if (!buf)
- break;
-
- mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
- phys_addr);
- }
-
- /* Update BM driver with number of buffers added to pool */
- bm_pool->buf_num += i;
-
- netdev_dbg(port->dev,
- "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
- bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
-
- netdev_dbg(port->dev,
- "pool %d: %d of %d buffers added\n",
- bm_pool->id, i, buf_num);
- return i;
-}
-
-/* Notify the driver that BM pool is being used as specific type and return the
- * pool pointer on success
- */
-static struct mvpp2_bm_pool *
-mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
-{
- struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
- int num;
-
- if (pool >= MVPP2_BM_POOLS_NUM) {
- netdev_err(port->dev, "Invalid pool %d\n", pool);
- return NULL;
- }
-
- /* Allocate buffers in case BM pool is used as long pool, but packet
- * size doesn't match MTU or BM pool hasn't being used yet
- */
- if (new_pool->pkt_size == 0) {
- int pkts_num;
-
- /* Set default buffer number or free all the buffers in case
- * the pool is not empty
- */
- pkts_num = new_pool->buf_num;
- if (pkts_num == 0)
- pkts_num = mvpp2_pools[pool].buf_num;
- else
- mvpp2_bm_bufs_free(port->dev->dev.parent,
- port->priv, new_pool, pkts_num);
-
- new_pool->pkt_size = pkt_size;
- new_pool->frag_size =
- SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
- MVPP2_SKB_SHINFO_SIZE;
-
- /* Allocate buffers for this pool */
- num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
- if (num != pkts_num) {
- WARN(1, "pool %d: %d of %d allocated\n",
- new_pool->id, num, pkts_num);
- return NULL;
- }
- }
-
- mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
- MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
-
- return new_pool;
-}
-
-/* Initialize pools for swf */
-static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
-{
- int rxq;
- enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
-
- /* If port pkt_size is higher than 1518B:
- * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
- * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
- */
- if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
- long_log_pool = MVPP2_BM_JUMBO;
- short_log_pool = MVPP2_BM_LONG;
- } else {
- long_log_pool = MVPP2_BM_LONG;
- short_log_pool = MVPP2_BM_SHORT;
- }
-
- if (!port->pool_long) {
- port->pool_long =
- mvpp2_bm_pool_use(port, long_log_pool,
- mvpp2_pools[long_log_pool].pkt_size);
- if (!port->pool_long)
- return -ENOMEM;
-
- port->pool_long->port_map |= BIT(port->id);
-
- for (rxq = 0; rxq < port->nrxqs; rxq++)
- mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
- }
-
- if (!port->pool_short) {
- port->pool_short =
- mvpp2_bm_pool_use(port, short_log_pool,
- mvpp2_pools[short_log_pool].pkt_size);
- if (!port->pool_short)
- return -ENOMEM;
-
- port->pool_short->port_map |= BIT(port->id);
-
- for (rxq = 0; rxq < port->nrxqs; rxq++)
- mvpp2_rxq_short_pool_set(port, rxq,
- port->pool_short->id);
- }
-
- return 0;
-}
-
-static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- enum mvpp2_bm_pool_log_num new_long_pool;
- int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
-
- /* If port MTU is higher than 1518B:
- * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
- * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
- */
- if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
- new_long_pool = MVPP2_BM_JUMBO;
- else
- new_long_pool = MVPP2_BM_LONG;
-
- if (new_long_pool != port->pool_long->id) {
- /* Remove port from old short & long pool */
- port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
- port->pool_long->pkt_size);
- port->pool_long->port_map &= ~BIT(port->id);
- port->pool_long = NULL;
-
- port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
- port->pool_short->pkt_size);
- port->pool_short->port_map &= ~BIT(port->id);
- port->pool_short = NULL;
-
- port->pkt_size = pkt_size;
-
- /* Add port to new short & long pool */
- mvpp2_swf_bm_pool_init(port);
-
- /* Update L4 checksum when jumbo enable/disable on port */
- if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- dev->hw_features &= ~(NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
- } else {
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- }
- }
-
- dev->mtu = mtu;
- dev->wanted_features = dev->features;
-
- netdev_update_features(dev);
- return 0;
-}
-
-static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
-{
- int i, sw_thread_mask = 0;
-
- for (i = 0; i < port->nqvecs; i++)
- sw_thread_mask |= port->qvecs[i].sw_thread_mask;
-
- mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
- MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
-}
-
-static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
-{
- int i, sw_thread_mask = 0;
-
- for (i = 0; i < port->nqvecs; i++)
- sw_thread_mask |= port->qvecs[i].sw_thread_mask;
-
- mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
- MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
-}
-
-static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
-{
- struct mvpp2_port *port = qvec->port;
-
- mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
- MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
-}
-
-static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
-{
- struct mvpp2_port *port = qvec->port;
-
- mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
- MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
-}
-
-/* Mask the current CPU's Rx/Tx interrupts
- * Called by on_each_cpu(), guaranteed to run with migration disabled,
- * using smp_processor_id() is OK.
- */
-static void mvpp2_interrupts_mask(void *arg)
-{
- struct mvpp2_port *port = arg;
-
- mvpp2_percpu_write(port->priv, smp_processor_id(),
- MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
-}
-
-/* Unmask the current CPU's Rx/Tx interrupts.
- * Called by on_each_cpu(), guaranteed to run with migration disabled,
- * using smp_processor_id() is OK.
- */
-static void mvpp2_interrupts_unmask(void *arg)
-{
- struct mvpp2_port *port = arg;
- u32 val;
-
- val = MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
- if (port->has_tx_irqs)
- val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
-
- mvpp2_percpu_write(port->priv, smp_processor_id(),
- MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
-}
-
-static void
-mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
-{
- u32 val;
- int i;
-
- if (port->priv->hw_version != MVPP22)
- return;
-
- if (mask)
- val = 0;
- else
- val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
-
- for (i = 0; i < port->nqvecs; i++) {
- struct mvpp2_queue_vector *v = port->qvecs + i;
-
- if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
- continue;
-
- mvpp2_percpu_write(port->priv, v->sw_thread_id,
- MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
- }
-}
-
-/* Port configuration routines */
-
-static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- u32 val;
-
- regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
- val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
- regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
-
- regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
- if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
- else if (port->gop_id == 3)
- val |= GENCONF_CTRL0_PORT1_RGMII_MII;
- regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
-}
-
-static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- u32 val;
-
- regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
- val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
- GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
- regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
-
- if (port->gop_id > 1) {
- regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
- if (port->gop_id == 2)
- val &= ~GENCONF_CTRL0_PORT0_RGMII;
- else if (port->gop_id == 3)
- val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
- regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
- }
-}
-
-static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
- void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
- u32 val;
-
- /* XPCS */
- val = readl(xpcs + MVPP22_XPCS_CFG0);
- val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
- MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
- val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
- writel(val, xpcs + MVPP22_XPCS_CFG0);
-
- /* MPCS */
- val = readl(mpcs + MVPP22_MPCS_CTRL);
- val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
- writel(val, mpcs + MVPP22_MPCS_CTRL);
-
- val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
- MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
- val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
-
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
- val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
-}
-
-static int mvpp22_gop_init(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- u32 val;
-
- if (!priv->sysctrl_base)
- return 0;
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (port->gop_id == 0)
- goto invalid_conf;
- mvpp22_gop_init_rgmii(port);
- break;
- case PHY_INTERFACE_MODE_SGMII:
- mvpp22_gop_init_sgmii(port);
- break;
- case PHY_INTERFACE_MODE_10GKR:
- if (port->gop_id != 0)
- goto invalid_conf;
- mvpp22_gop_init_10gkr(port);
- break;
- default:
- goto unsupported_conf;
- }
-
- regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
- val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
- GENCONF_PORT_CTRL1_EN(port->gop_id);
- regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
-
- regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
- val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
- regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
-
- regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
- val |= GENCONF_SOFT_RESET1_GOP;
- regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
-
-unsupported_conf:
- return 0;
-
-invalid_conf:
- netdev_err(port->dev, "Invalid port configuration\n");
- return -EINVAL;
-}
-
-static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
-{
- u32 val;
-
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- /* Enable the GMAC link status irq for this port */
- val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
- val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
- writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
- }
-
- if (port->gop_id == 0) {
- /* Enable the XLG/GIG irqs for this port */
- val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
- if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
- val |= MVPP22_XLG_EXT_INT_MASK_XLG;
- else
- val |= MVPP22_XLG_EXT_INT_MASK_GIG;
- writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
- }
-}
-
-static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
-{
- u32 val;
-
- if (port->gop_id == 0) {
- val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
- val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
- MVPP22_XLG_EXT_INT_MASK_GIG);
- writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
- }
-
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
- val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
- writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
- }
-}
-
-static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
-{
- u32 val;
-
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_INT_MASK);
- val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
- writel(val, port->base + MVPP22_GMAC_INT_MASK);
- }
-
- if (port->gop_id == 0) {
- val = readl(port->base + MVPP22_XLG_INT_MASK);
- val |= MVPP22_XLG_INT_MASK_LINK;
- writel(val, port->base + MVPP22_XLG_INT_MASK);
- }
-
- mvpp22_gop_unmask_irq(port);
-}
-
-static int mvpp22_comphy_init(struct mvpp2_port *port)
-{
- enum phy_mode mode;
- int ret;
-
- if (!port->comphy)
- return 0;
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_SGMII:
- mode = PHY_MODE_SGMII;
- break;
- case PHY_INTERFACE_MODE_10GKR:
- mode = PHY_MODE_10GKR;
- break;
- default:
- return -EINVAL;
- }
-
- ret = phy_set_mode(port->comphy, mode);
- if (ret)
- return ret;
-
- return phy_power_on(port->comphy);
-}
-
-static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
-{
- u32 val;
-
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
- MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
- writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
- } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
- val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
- MVPP22_CTRL4_SYNC_BYPASS_DIS |
- MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- val &= ~MVPP22_CTRL4_DP_CLK_SEL;
- writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
- }
-
- /* The port is connected to a copper PHY */
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
-
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_AN_DUPLEX_EN;
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
- val |= MVPP2_GMAC_IN_BAND_AUTONEG;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
-{
- u32 val;
-
- /* Force link down */
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- val |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state */
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- /* Configure the PCS and in-band AN */
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
- } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
- val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
- }
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- mvpp2_port_mii_gmac_configure_mode(port);
-
- /* Unset the GMAC reset state */
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val &= ~MVPP2_GMAC_PORT_RESET_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- /* Stop forcing link down */
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
-{
- u32 val;
-
- if (port->gop_id != 0)
- return;
-
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
-
- val = readl(port->base + MVPP22_XLG_CTRL4_REG);
- val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
- val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
- writel(val, port->base + MVPP22_XLG_CTRL4_REG);
-}
-
-static void mvpp22_port_mii_set(struct mvpp2_port *port)
-{
- u32 val;
-
- /* Only GOP port 0 has an XLG MAC */
- if (port->gop_id == 0) {
- val = readl(port->base + MVPP22_XLG_CTRL3_REG);
- val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
-
- if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)
- val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
- else
- val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
-
- writel(val, port->base + MVPP22_XLG_CTRL3_REG);
- }
-}
-
-static void mvpp2_port_mii_set(struct mvpp2_port *port)
-{
- if (port->priv->hw_version == MVPP22)
- mvpp22_port_mii_set(port);
-
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII)
- mvpp2_port_mii_gmac_configure(port);
- else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
- mvpp2_port_mii_xlg_configure(port);
-}
-
-static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val |= MVPP2_GMAC_FC_ADV_EN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-static void mvpp2_port_enable(struct mvpp2_port *port)
-{
- u32 val;
-
- /* Only GOP port 0 has an XLG MAC */
- if (port->gop_id == 0 &&
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val |= MVPP22_XLG_CTRL0_PORT_EN |
- MVPP22_XLG_CTRL0_MAC_RESET_DIS;
- val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val |= MVPP2_GMAC_PORT_EN_MASK;
- val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
- }
-}
-
-static void mvpp2_port_disable(struct mvpp2_port *port)
-{
- u32 val;
-
- /* Only GOP port 0 has an XLG MAC */
- if (port->gop_id == 0 &&
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
- MVPP22_XLG_CTRL0_MAC_RESET_DIS);
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
- }
-}
-
-/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
-static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
- ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
-}
-
-/* Configure loopback port */
-static void mvpp2_port_loopback_set(struct mvpp2_port *port)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
-
- if (port->speed == 1000)
- val |= MVPP2_GMAC_GMII_LB_EN_MASK;
- else
- val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
-
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
- val |= MVPP2_GMAC_PCS_LB_EN_MASK;
- else
- val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
-
- writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
-}
-
-struct mvpp2_ethtool_counter {
- unsigned int offset;
- const char string[ETH_GSTRING_LEN];
- bool reg_is_64b;
-};
-
-static u64 mvpp2_read_count(struct mvpp2_port *port,
- const struct mvpp2_ethtool_counter *counter)
-{
- u64 val;
-
- val = readl(port->stats_base + counter->offset);
- if (counter->reg_is_64b)
- val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
-
- return val;
-}
-
-/* Due to the fact that software statistics and hardware statistics are, by
- * design, incremented at different moments in the chain of packet processing,
- * it is very likely that incoming packets could have been dropped after being
- * counted by hardware but before reaching software statistics (most probably
- * multicast packets), and in the oppposite way, during transmission, FCS bytes
- * are added in between as well as TSO skb will be split and header bytes added.
- * Hence, statistics gathered from userspace with ifconfig (software) and
- * ethtool (hardware) cannot be compared.
- */
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
- { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
- { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
- { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
- { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
- { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
- { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
- { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
- { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
- { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
- { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
- { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
- { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
- { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
- { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
- { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
- { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
- { MVPP2_MIB_FC_SENT, "fc_sent" },
- { MVPP2_MIB_FC_RCVD, "fc_received" },
- { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
- { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
- { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
- { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
- { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
- { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
- { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
- { MVPP2_MIB_COLLISION, "collision" },
- { MVPP2_MIB_LATE_COLLISION, "late_collision" },
-};
-
-static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
- u8 *data)
-{
- if (sset == ETH_SS_STATS) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static void mvpp2_gather_hw_statistics(struct work_struct *work)
-{
- struct delayed_work *del_work = to_delayed_work(work);
- struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
- stats_work);
- u64 *pstats;
- int i;
-
- mutex_lock(&port->gather_stats_lock);
-
- pstats = port->ethtool_stats;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
-
- /* No need to read again the counters right after this function if it
- * was called asynchronously by the user (ie. use of ethtool).
- */
- cancel_delayed_work(&port->stats_work);
- queue_delayed_work(port->priv->stats_queue, &port->stats_work,
- MVPP2_MIB_COUNTERS_STATS_DELAY);
-
- mutex_unlock(&port->gather_stats_lock);
-}
-
-static void mvpp2_ethtool_get_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- /* Update statistics for the given port, then take the lock to avoid
- * concurrent accesses on the ethtool_stats structure during its copy.
- */
- mvpp2_gather_hw_statistics(&port->stats_work.work);
-
- mutex_lock(&port->gather_stats_lock);
- memcpy(data, port->ethtool_stats,
- sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
- mutex_unlock(&port->gather_stats_lock);
-}
-
-static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvpp2_ethtool_regs);
-
- return -EOPNOTSUPP;
-}
-
-static void mvpp2_port_reset(struct mvpp2_port *port)
-{
- u32 val;
- unsigned int i;
-
- /* Read the GOP statistics to reset the hardware counters */
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- ~MVPP2_GMAC_PORT_RESET_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- MVPP2_GMAC_PORT_RESET_MASK)
- continue;
-}
-
-/* Change maximum receive size of the port */
-static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
- val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
- MVPP2_GMAC_MAX_RX_SIZE_OFFS);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
-}
-
-/* Change maximum receive size of the port */
-static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
-{
- u32 val;
-
- val = readl(port->base + MVPP22_XLG_CTRL1_REG);
- val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
- val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
- MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
- writel(val, port->base + MVPP22_XLG_CTRL1_REG);
-}
-
-/* Set defaults to the MVPP2 port */
-static void mvpp2_defaults_set(struct mvpp2_port *port)
-{
- int tx_port_num, val, queue, ptxq, lrxq;
-
- if (port->priv->hw_version == MVPP21) {
- /* Configure port to loopback if needed */
- if (port->flags & MVPP2_F_LOOPBACK)
- mvpp2_port_loopback_set(port);
-
- /* Update TX FIFO MIN Threshold */
- val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
- /* Min. TX threshold must be less than minimal packet length */
- val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
- writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- }
-
- /* Disable Legacy WRR, Disable EJP, Release from reset */
- tx_port_num = mvpp2_egress_port(port);
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
- tx_port_num);
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
-
- /* Close bandwidth for all queues */
- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
- ptxq = mvpp2_txq_phys(port->id, queue);
- mvpp2_write(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
- }
-
- /* Set refill period to 1 usec, refill tokens
- * and bucket size to maximum
- */
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
- port->priv->tclk / USEC_PER_SEC);
- val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
- val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
- val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
- val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
- val = MVPP2_TXP_TOKEN_SIZE_MAX;
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
-
- /* Set MaximumLowLatencyPacketSize value to 256 */
- mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
- MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
- MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
-
- /* Enable Rx cache snoop */
- for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
- queue = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
- val |= MVPP2_SNOOP_PKT_SIZE_MASK |
- MVPP2_SNOOP_BUF_HDR_MASK;
- mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
- }
-
- /* At default, mask all interrupts to all present cpus */
- mvpp2_interrupts_disable(port);
-}
-
-/* Enable/disable receiving packets */
-static void mvpp2_ingress_enable(struct mvpp2_port *port)
-{
- u32 val;
- int lrxq, queue;
-
- for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
- queue = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
- val &= ~MVPP2_RXQ_DISABLE_MASK;
- mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
- }
-}
-
-static void mvpp2_ingress_disable(struct mvpp2_port *port)
-{
- u32 val;
- int lrxq, queue;
-
- for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
- queue = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
- val |= MVPP2_RXQ_DISABLE_MASK;
- mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
- }
-}
-
-/* Enable transmit via physical egress queue
- * - HW starts take descriptors from DRAM
- */
-static void mvpp2_egress_enable(struct mvpp2_port *port)
-{
- u32 qmap;
- int queue;
- int tx_port_num = mvpp2_egress_port(port);
-
- /* Enable all initialized TXs. */
- qmap = 0;
- for (queue = 0; queue < port->ntxqs; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- if (txq->descs)
- qmap |= (1 << queue);
- }
-
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
-}
-
-/* Disable transmit via physical egress queue
- * - HW doesn't take descriptors from DRAM
- */
-static void mvpp2_egress_disable(struct mvpp2_port *port)
-{
- u32 reg_data;
- int delay;
- int tx_port_num = mvpp2_egress_port(port);
-
- /* Issue stop command for active channels only */
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
- reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
- MVPP2_TXP_SCHED_ENQ_MASK;
- if (reg_data != 0)
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
- (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
-
- /* Wait for all Tx activity to terminate. */
- delay = 0;
- do {
- if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
- netdev_warn(port->dev,
- "Tx stop timed out, status=0x%08x\n",
- reg_data);
- break;
- }
- mdelay(1);
- delay++;
-
- /* Check port TX Command register that all
- * Tx queues are stopped
- */
- reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
- } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
-}
-
-/* Rx descriptors helper methods */
-
-/* Get number of Rx descriptors occupied by received packets */
-static inline int
-mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
-{
- u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
-
- return val & MVPP2_RXQ_OCCUPIED_MASK;
-}
-
-/* Update Rx queue status with the number of occupied and available
- * Rx descriptor slots.
- */
-static inline void
-mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
- int used_count, int free_count)
-{
- /* Decrement the number of used descriptors and increment count
- * increment the number of free descriptors.
- */
- u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
-
- mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
-}
-
-/* Get pointer to next RX descriptor to be processed by SW */
-static inline struct mvpp2_rx_desc *
-mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
-{
- int rx_desc = rxq->next_desc_to_proc;
-
- rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
- prefetch(rxq->descs + rxq->next_desc_to_proc);
- return rxq->descs + rx_desc;
-}
-
-/* Set rx queue offset */
-static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
- int prxq, int offset)
-{
- u32 val;
-
- /* Convert offset from bytes to units of 32 bytes */
- offset = offset >> 5;
-
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
-
- /* Offset is in */
- val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
- MVPP2_RXQ_PACKET_OFFSET_MASK);
-
- mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
-}
-
-/* Tx descriptors helper methods */
-
-/* Get pointer to next Tx descriptor to be processed (send) by HW */
-static struct mvpp2_tx_desc *
-mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
-{
- int tx_desc = txq->next_desc_to_proc;
-
- txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
- return txq->descs + tx_desc;
-}
-
-/* Update HW with number of aggregated Tx descriptors to be sent
- *
- * Called only from mvpp2_tx(), so migration is disabled, using
- * smp_processor_id() is OK.
- */
-static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
-{
- /* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_percpu_write(port->priv, smp_processor_id(),
- MVPP2_AGGR_TXQ_UPDATE_REG, pending);
-}
-
-
-/* Check if there are enough free descriptors in aggregated txq.
- * If not, update the number of occupied descriptors and repeat the check.
- *
- * Called only from mvpp2_tx(), so migration is disabled, using
- * smp_processor_id() is OK.
- */
-static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
- struct mvpp2_tx_queue *aggr_txq, int num)
-{
- if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
- /* Update number of occupied aggregated Tx descriptors */
- int cpu = smp_processor_id();
- u32 val = mvpp2_read_relaxed(priv,
- MVPP2_AGGR_TXQ_STATUS_REG(cpu));
-
- aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
- }
-
- if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
- return -ENOMEM;
-
- return 0;
-}
-
-/* Reserved Tx descriptors allocation request
- *
- * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
- * only by mvpp2_tx(), so migration is disabled, using
- * smp_processor_id() is OK.
- */
-static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
- struct mvpp2_tx_queue *txq, int num)
-{
- u32 val;
- int cpu = smp_processor_id();
-
- val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
-
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
-
- return val & MVPP2_TXQ_RSVD_RSLT_MASK;
-}
-
-/* Check if there are enough reserved descriptors for transmission.
- * If not, request chunk of reserved descriptors and check again.
- */
-static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
- struct mvpp2_tx_queue *txq,
- struct mvpp2_txq_pcpu *txq_pcpu,
- int num)
-{
- int req, cpu, desc_count;
-
- if (txq_pcpu->reserved_num >= num)
- return 0;
-
- /* Not enough descriptors reserved! Update the reserved descriptor
- * count and check again.
- */
-
- desc_count = 0;
- /* Compute total of used descriptors */
- for_each_present_cpu(cpu) {
- struct mvpp2_txq_pcpu *txq_pcpu_aux;
-
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
- desc_count += txq_pcpu_aux->count;
- desc_count += txq_pcpu_aux->reserved_num;
- }
-
- req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
- desc_count += req;
-
- if (desc_count >
- (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
- return -ENOMEM;
-
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
-
- /* OK, the descriptor cound has been updated: check again. */
- if (txq_pcpu->reserved_num < num)
- return -ENOMEM;
- return 0;
-}
-
-/* Release the last allocated Tx descriptor. Useful to handle DMA
- * mapping failures in the Tx path.
- */
-static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
-{
- if (txq->next_desc_to_proc == 0)
- txq->next_desc_to_proc = txq->last_desc - 1;
- else
- txq->next_desc_to_proc--;
-}
-
-/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
- int ip_hdr_len, int l4_proto)
-{
- u32 command;
-
- /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
- * G_L4_chk, L4_type required only for checksum calculation
- */
- command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
- command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
- command |= MVPP2_TXD_IP_CSUM_DISABLE;
-
- if (l3_proto == swab16(ETH_P_IP)) {
- command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
- command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
- } else {
- command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
- }
-
- if (l4_proto == IPPROTO_TCP) {
- command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
- command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
- } else if (l4_proto == IPPROTO_UDP) {
- command |= MVPP2_TXD_L4_UDP; /* enable UDP */
- command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
- } else {
- command |= MVPP2_TXD_L4_CSUM_NOT;
- }
-
- return command;
-}
-
-/* Get number of sent descriptors and decrement counter.
- * The number of sent descriptors is returned.
- * Per-CPU access
- *
- * Called only from mvpp2_txq_done(), called from mvpp2_tx()
- * (migration disabled) and from the TX completion tasklet (migration
- * disabled) so using smp_processor_id() is OK.
- */
-static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- u32 val;
-
- /* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
- MVPP2_TXQ_SENT_REG(txq->id));
-
- return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
- MVPP2_TRANSMITTED_COUNT_OFFSET;
-}
-
-/* Called through on_each_cpu(), so runs on all CPUs, with migration
- * disabled, therefore using smp_processor_id() is OK.
- */
-static void mvpp2_txq_sent_counter_clear(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
-
- for (queue = 0; queue < port->ntxqs; queue++) {
- int id = port->txqs[queue]->id;
-
- mvpp2_percpu_read(port->priv, smp_processor_id(),
- MVPP2_TXQ_SENT_REG(id));
- }
-}
-
-/* Set max sizes for Tx queues */
-static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
-{
- u32 val, size, mtu;
- int txq, tx_port_num;
-
- mtu = port->pkt_size * 8;
- if (mtu > MVPP2_TXP_MTU_MAX)
- mtu = MVPP2_TXP_MTU_MAX;
-
- /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
- mtu = 3 * mtu;
-
- /* Indirect access to registers */
- tx_port_num = mvpp2_egress_port(port);
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
-
- /* Set MTU */
- val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
- val &= ~MVPP2_TXP_MTU_MAX;
- val |= mtu;
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
-
- /* TXP token size and all TXQs token size must be larger that MTU */
- val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
- size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
- if (size < mtu) {
- size = mtu;
- val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
- val |= size;
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
- }
-
- for (txq = 0; txq < port->ntxqs; txq++) {
- val = mvpp2_read(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
- size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
-
- if (size < mtu) {
- size = mtu;
- val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
- val |= size;
- mvpp2_write(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
- val);
- }
- }
-}
-
-/* Set the number of packets that will be received before Rx interrupt
- * will be generated by HW.
- */
-static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq)
-{
- int cpu = get_cpu();
-
- if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
- rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
-
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
- rxq->pkts_coal);
-
- put_cpu();
-}
-
-/* For some reason in the LSP this is done on each CPU. Why ? */
-static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- int cpu = get_cpu();
- u32 val;
-
- if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
- txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
-
- val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
-}
-
-static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
-{
- u64 tmp = (u64)clk_hz * usec;
-
- do_div(tmp, USEC_PER_SEC);
-
- return tmp > U32_MAX ? U32_MAX : tmp;
-}
-
-static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
-{
- u64 tmp = (u64)cycles * USEC_PER_SEC;
-
- do_div(tmp, clk_hz);
-
- return tmp > U32_MAX ? U32_MAX : tmp;
-}
-
-/* Set the time delay in usec before Rx interrupt */
-static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq)
-{
- unsigned long freq = port->priv->tclk;
- u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
-
- if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
- rxq->time_coal =
- mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
-
- /* re-evaluate to get actual register value */
- val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
- }
-
- mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
-}
-
-static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
-{
- unsigned long freq = port->priv->tclk;
- u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
-
- if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
- port->tx_time_coal =
- mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
-
- /* re-evaluate to get actual register value */
- val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
- }
-
- mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
-}
-
-/* Free Tx queue skbuffs */
-static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq,
- struct mvpp2_txq_pcpu *txq_pcpu, int num)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- struct mvpp2_txq_pcpu_buf *tx_buf =
- txq_pcpu->buffs + txq_pcpu->txq_get_index;
-
- if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
- dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
- tx_buf->size, DMA_TO_DEVICE);
- if (tx_buf->skb)
- dev_kfree_skb_any(tx_buf->skb);
-
- mvpp2_txq_inc_get(txq_pcpu);
- }
-}
-
-static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
- u32 cause)
-{
- int queue = fls(cause) - 1;
-
- return port->rxqs[queue];
-}
-
-static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
- u32 cause)
-{
- int queue = fls(cause) - 1;
-
- return port->txqs[queue];
-}
-
-/* Handle end of transmission */
-static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
- struct mvpp2_txq_pcpu *txq_pcpu)
-{
- struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
- int tx_done;
-
- if (txq_pcpu->cpu != smp_processor_id())
- netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
-
- tx_done = mvpp2_txq_sent_desc_proc(port, txq);
- if (!tx_done)
- return;
- mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
-
- txq_pcpu->count -= tx_done;
-
- if (netif_tx_queue_stopped(nq))
- if (txq_pcpu->count <= txq_pcpu->wake_threshold)
- netif_tx_wake_queue(nq);
-}
-
-static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
- int cpu)
-{
- struct mvpp2_tx_queue *txq;
- struct mvpp2_txq_pcpu *txq_pcpu;
- unsigned int tx_todo = 0;
-
- while (cause) {
- txq = mvpp2_get_tx_queue(port, cause);
- if (!txq)
- break;
-
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-
- if (txq_pcpu->count) {
- mvpp2_txq_done(port, txq, txq_pcpu);
- tx_todo += txq_pcpu->count;
- }
-
- cause &= ~(1 << txq->log_id);
- }
- return tx_todo;
-}
-
-/* Rx/Tx queue initialization/cleanup methods */
-
-/* Allocate and initialize descriptors for aggr TXQ */
-static int mvpp2_aggr_txq_init(struct platform_device *pdev,
- struct mvpp2_tx_queue *aggr_txq, int cpu,
- struct mvpp2 *priv)
-{
- u32 txq_dma;
-
- /* Allocate memory for TX descriptors */
- aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_dma, GFP_KERNEL);
- if (!aggr_txq->descs)
- return -ENOMEM;
-
- aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
-
- /* Aggr TXQ no reset WA */
- aggr_txq->next_desc_to_proc = mvpp2_read(priv,
- MVPP2_AGGR_TXQ_INDEX_REG(cpu));
-
- /* Set Tx descriptors queue starting address indirect
- * access
- */
- if (priv->hw_version == MVPP21)
- txq_dma = aggr_txq->descs_dma;
- else
- txq_dma = aggr_txq->descs_dma >>
- MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
-
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
- MVPP2_AGGR_TXQ_SIZE);
-
- return 0;
-}
-
-/* Create a specified Rx queue */
-static int mvpp2_rxq_init(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq)
-
-{
- u32 rxq_dma;
- int cpu;
-
- rxq->size = port->rx_ring_size;
-
- /* Allocate memory for RX descriptors */
- rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
- rxq->size * MVPP2_DESC_ALIGNED_SIZE,
- &rxq->descs_dma, GFP_KERNEL);
- if (!rxq->descs)
- return -ENOMEM;
-
- rxq->last_desc = rxq->size - 1;
-
- /* Zero occupied and non-occupied counters - direct access */
- mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
-
- /* Set Rx descriptors queue starting address - indirect access */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- if (port->priv->hw_version == MVPP21)
- rxq_dma = rxq->descs_dma;
- else
- rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
- put_cpu();
-
- /* Set Offset */
- mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
-
- /* Set coalescing pkts and time */
- mvpp2_rx_pkts_coal_set(port, rxq);
- mvpp2_rx_time_coal_set(port, rxq);
-
- /* Add number of descriptors ready for receiving packets */
- mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
-
- return 0;
-}
-
-/* Push packets received by the RXQ to BM pool */
-static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq)
-{
- int rx_received, i;
-
- rx_received = mvpp2_rxq_received(port, rxq->id);
- if (!rx_received)
- return;
-
- for (i = 0; i < rx_received; i++) {
- struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
- int pool;
-
- pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
-
- mvpp2_bm_pool_put(port, pool,
- mvpp2_rxdesc_dma_addr_get(port, rx_desc),
- mvpp2_rxdesc_cookie_get(port, rx_desc));
- }
- mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
-}
-
-/* Cleanup Rx queue */
-static void mvpp2_rxq_deinit(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq)
-{
- int cpu;
-
- mvpp2_rxq_drop_pkts(port, rxq);
-
- if (rxq->descs)
- dma_free_coherent(port->dev->dev.parent,
- rxq->size * MVPP2_DESC_ALIGNED_SIZE,
- rxq->descs,
- rxq->descs_dma);
-
- rxq->descs = NULL;
- rxq->last_desc = 0;
- rxq->next_desc_to_proc = 0;
- rxq->descs_dma = 0;
-
- /* Clear Rx descriptors queue starting address and size;
- * free descriptor number
- */
- mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
- put_cpu();
-}
-
-/* Create and initialize a Tx queue */
-static int mvpp2_txq_init(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- u32 val;
- int cpu, desc, desc_per_txq, tx_port_num;
- struct mvpp2_txq_pcpu *txq_pcpu;
-
- txq->size = port->tx_ring_size;
-
- /* Allocate memory for Tx descriptors */
- txq->descs = dma_alloc_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- &txq->descs_dma, GFP_KERNEL);
- if (!txq->descs)
- return -ENOMEM;
-
- txq->last_desc = txq->size - 1;
-
- /* Set Tx descriptors queue starting address - indirect access */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
- txq->descs_dma);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
- txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
- txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
- val &= ~MVPP2_TXQ_PENDING_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
-
- /* Calculate base address in prefetch buffer. We reserve 16 descriptors
- * for each existing TXQ.
- * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
- * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
- */
- desc_per_txq = 16;
- desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
- (txq->log_id * desc_per_txq);
-
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
- MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
- MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
- put_cpu();
-
- /* WRR / EJP configuration - indirect access */
- tx_port_num = mvpp2_egress_port(port);
- mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
-
- val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
- val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
- val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
- val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
-
- val = MVPP2_TXQ_TOKEN_SIZE_MAX;
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
- val);
-
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- txq_pcpu->size = txq->size;
- txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
- sizeof(*txq_pcpu->buffs),
- GFP_KERNEL);
- if (!txq_pcpu->buffs)
- return -ENOMEM;
-
- txq_pcpu->count = 0;
- txq_pcpu->reserved_num = 0;
- txq_pcpu->txq_put_index = 0;
- txq_pcpu->txq_get_index = 0;
- txq_pcpu->tso_headers = NULL;
-
- txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
- txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
-
- txq_pcpu->tso_headers =
- dma_alloc_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- &txq_pcpu->tso_headers_dma,
- GFP_KERNEL);
- if (!txq_pcpu->tso_headers)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* Free allocated TXQ resources */
-static void mvpp2_txq_deinit(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- struct mvpp2_txq_pcpu *txq_pcpu;
- int cpu;
-
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->buffs);
-
- if (txq_pcpu->tso_headers)
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
-
- txq_pcpu->tso_headers = NULL;
- }
-
- if (txq->descs)
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_dma);
-
- txq->descs = NULL;
- txq->last_desc = 0;
- txq->next_desc_to_proc = 0;
- txq->descs_dma = 0;
-
- /* Set minimum bandwidth for disabled TXQs */
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
-
- /* Set Tx descriptors queue starting address and size */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
- put_cpu();
-}
-
-/* Cleanup Tx ports */
-static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
-{
- struct mvpp2_txq_pcpu *txq_pcpu;
- int delay, pending, cpu;
- u32 val;
-
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
- val |= MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
-
- /* The napi queue has been stopped so wait for all packets
- * to be transmitted.
- */
- delay = 0;
- do {
- if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
- netdev_warn(port->dev,
- "port %d: cleaning queue %d timed out\n",
- port->id, txq->log_id);
- break;
- }
- mdelay(1);
- delay++;
-
- pending = mvpp2_percpu_read(port->priv, cpu,
- MVPP2_TXQ_PENDING_REG);
- pending &= MVPP2_TXQ_PENDING_MASK;
- } while (pending);
-
- val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
- put_cpu();
-
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-
- /* Release all packets */
- mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
-
- /* Reset queue */
- txq_pcpu->count = 0;
- txq_pcpu->txq_put_index = 0;
- txq_pcpu->txq_get_index = 0;
- }
-}
-
-/* Cleanup all Tx queues */
-static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
-{
- struct mvpp2_tx_queue *txq;
- int queue;
- u32 val;
-
- val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
-
- /* Reset Tx ports and delete Tx queues */
- val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
- mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
-
- for (queue = 0; queue < port->ntxqs; queue++) {
- txq = port->txqs[queue];
- mvpp2_txq_clean(port, txq);
- mvpp2_txq_deinit(port, txq);
- }
-
- on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
-
- val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
- mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
-}
-
-/* Cleanup all Rx queues */
-static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
-{
- int queue;
-
- for (queue = 0; queue < port->nrxqs; queue++)
- mvpp2_rxq_deinit(port, port->rxqs[queue]);
-}
-
-/* Init all Rx queues for port */
-static int mvpp2_setup_rxqs(struct mvpp2_port *port)
-{
- int queue, err;
-
- for (queue = 0; queue < port->nrxqs; queue++) {
- err = mvpp2_rxq_init(port, port->rxqs[queue]);
- if (err)
- goto err_cleanup;
- }
- return 0;
-
-err_cleanup:
- mvpp2_cleanup_rxqs(port);
- return err;
-}
-
-/* Init all tx queues for port */
-static int mvpp2_setup_txqs(struct mvpp2_port *port)
-{
- struct mvpp2_tx_queue *txq;
- int queue, err;
-
- for (queue = 0; queue < port->ntxqs; queue++) {
- txq = port->txqs[queue];
- err = mvpp2_txq_init(port, txq);
- if (err)
- goto err_cleanup;
- }
-
- if (port->has_tx_irqs) {
- mvpp2_tx_time_coal_set(port);
- for (queue = 0; queue < port->ntxqs; queue++) {
- txq = port->txqs[queue];
- mvpp2_tx_pkts_coal_set(port, txq);
- }
- }
-
- on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
- return 0;
-
-err_cleanup:
- mvpp2_cleanup_txqs(port);
- return err;
-}
-
-/* The callback for per-port interrupt */
-static irqreturn_t mvpp2_isr(int irq, void *dev_id)
-{
- struct mvpp2_queue_vector *qv = dev_id;
-
- mvpp2_qvec_interrupt_disable(qv);
-
- napi_schedule(&qv->napi);
-
- return IRQ_HANDLED;
-}
-
-/* Per-port interrupt for link status changes */
-static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
-{
- struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
- struct net_device *dev = port->dev;
- bool event = false, link = false;
- u32 val;
-
- mvpp22_gop_mask_irq(port);
-
- if (port->gop_id == 0 &&
- port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
- val = readl(port->base + MVPP22_XLG_INT_STAT);
- if (val & MVPP22_XLG_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP22_XLG_STATUS);
- if (val & MVPP22_XLG_STATUS_LINK_UP)
- link = true;
- }
- } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_INT_STAT);
- if (val & MVPP22_GMAC_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP2_GMAC_STATUS0);
- if (val & MVPP2_GMAC_STATUS0_LINK_UP)
- link = true;
- }
- }
-
- if (!netif_running(dev) || !event)
- goto handled;
-
- if (link) {
- mvpp2_interrupts_enable(port);
-
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
- } else {
- netif_tx_stop_all_queues(dev);
- netif_carrier_off(dev);
- mvpp2_ingress_disable(port);
- mvpp2_egress_disable(port);
-
- mvpp2_interrupts_disable(port);
- }
-
-handled:
- mvpp22_gop_unmask_irq(port);
- return IRQ_HANDLED;
-}
-
-static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
- struct phy_device *phydev)
-{
- u32 val;
-
- if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
- port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
- port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- port->phy_interface != PHY_INTERFACE_MODE_SGMII)
- return;
-
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX |
- MVPP2_GMAC_AN_SPEED_EN |
- MVPP2_GMAC_AN_DUPLEX_EN);
-
- if (phydev->duplex)
- val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (phydev->speed == SPEED_1000)
- val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVPP2_GMAC_CONFIG_MII_SPEED;
-
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-/* Adjust link */
-static void mvpp2_link_event(struct net_device *dev)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- bool link_reconfigured = false;
- u32 val;
-
- if (phydev->link) {
- if (port->phy_interface != phydev->interface && port->comphy) {
- /* disable current port for reconfiguration */
- mvpp2_interrupts_disable(port);
- netif_carrier_off(port->dev);
- mvpp2_port_disable(port);
- phy_power_off(port->comphy);
-
- /* comphy reconfiguration */
- port->phy_interface = phydev->interface;
- mvpp22_comphy_init(port);
-
- /* gop/mac reconfiguration */
- mvpp22_gop_init(port);
- mvpp2_port_mii_set(port);
-
- link_reconfigured = true;
- }
-
- if ((port->speed != phydev->speed) ||
- (port->duplex != phydev->duplex)) {
- mvpp2_gmac_set_autoneg(port, phydev);
-
- port->duplex = phydev->duplex;
- port->speed = phydev->speed;
- }
- }
-
- if (phydev->link != port->link || link_reconfigured) {
- port->link = phydev->link;
-
- if (phydev->link) {
- if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val |= (MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_FORCE_LINK_DOWN);
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- }
-
- mvpp2_interrupts_enable(port);
- mvpp2_port_enable(port);
-
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
- } else {
- port->duplex = -1;
- port->speed = 0;
-
- netif_tx_stop_all_queues(dev);
- netif_carrier_off(dev);
- mvpp2_ingress_disable(port);
- mvpp2_egress_disable(port);
-
- mvpp2_port_disable(port);
- mvpp2_interrupts_disable(port);
- }
-
- phy_print_status(phydev);
- }
-}
-
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
-{
- ktime_t interval;
-
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
- hrtimer_start(&port_pcpu->tx_done_timer, interval,
- HRTIMER_MODE_REL_PINNED);
- }
-}
-
-static void mvpp2_tx_proc_cb(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
- unsigned int tx_todo, cause;
-
- if (!netif_running(dev))
- return;
- port_pcpu->timer_scheduled = false;
-
- /* Process all the Tx queues */
- cause = (1 << port->ntxqs) - 1;
- tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
-
- /* Set the timer in case not all the packets were processed */
- if (tx_todo)
- mvpp2_timer_set(port_pcpu);
-}
-
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
-{
- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
- struct mvpp2_port_pcpu,
- tx_done_timer);
-
- tasklet_schedule(&port_pcpu->tx_done_tasklet);
-
- return HRTIMER_NORESTART;
-}
-
-/* Main RX/TX processing routines */
-
-/* Display more error info */
-static void mvpp2_rx_error(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
- size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
-
- switch (status & MVPP2_RXD_ERR_CODE_MASK) {
- case MVPP2_RXD_ERR_CRC:
- netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
- status, sz);
- break;
- case MVPP2_RXD_ERR_OVERRUN:
- netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
- status, sz);
- break;
- case MVPP2_RXD_ERR_RESOURCE:
- netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
- status, sz);
- break;
- }
-}
-
-/* Handle RX checksum offload */
-static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
- struct sk_buff *skb)
-{
- if (((status & MVPP2_RXD_L3_IP4) &&
- !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
- (status & MVPP2_RXD_L3_IP6))
- if (((status & MVPP2_RXD_L4_UDP) ||
- (status & MVPP2_RXD_L4_TCP)) &&
- (status & MVPP2_RXD_L4_CSUM_OK)) {
- skb->csum = 0;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
-
- skb->ip_summed = CHECKSUM_NONE;
-}
-
-/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
-static int mvpp2_rx_refill(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool, int pool)
-{
- dma_addr_t dma_addr;
- phys_addr_t phys_addr;
- void *buf;
-
- /* No recycle or too many buffers are in use, so allocate a new skb */
- buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
- GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
-
- return 0;
-}
-
-/* Handle tx checksum */
-static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
-{
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int ip_hdr_len = 0;
- u8 l4_proto;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *ip4h = ip_hdr(skb);
-
- /* Calculate IPv4 checksum and L4 checksum */
- ip_hdr_len = ip4h->ihl;
- l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
- /* Read l4_protocol from one of IPv6 extra headers */
- if (skb_network_header_len(skb) > 0)
- ip_hdr_len = (skb_network_header_len(skb) >> 2);
- l4_proto = ip6h->nexthdr;
- } else {
- return MVPP2_TXD_L4_CSUM_NOT;
- }
-
- return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
- }
-
- return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
-}
-
-/* Main rx processing */
-static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
- int rx_todo, struct mvpp2_rx_queue *rxq)
-{
- struct net_device *dev = port->dev;
- int rx_received;
- int rx_done = 0;
- u32 rcvd_pkts = 0;
- u32 rcvd_bytes = 0;
-
- /* Get number of received packets and clamp the to-do */
- rx_received = mvpp2_rxq_received(port, rxq->id);
- if (rx_todo > rx_received)
- rx_todo = rx_received;
-
- while (rx_done < rx_todo) {
- struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- struct mvpp2_bm_pool *bm_pool;
- struct sk_buff *skb;
- unsigned int frag_size;
- dma_addr_t dma_addr;
- phys_addr_t phys_addr;
- u32 rx_status;
- int pool, rx_bytes, err;
- void *data;
-
- rx_done++;
- rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
- rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
- rx_bytes -= MVPP2_MH_SIZE;
- dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
- phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
- data = (void *)phys_to_virt(phys_addr);
-
- pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
- bm_pool = &port->priv->bm_pools[pool];
-
- /* In case of an error, release the requested buffer pointer
- * to the Buffer Manager. This request process is controlled
- * by the hardware, and the information about the buffer is
- * comprised by the RX descriptor.
- */
- if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
-err_drop_frame:
- dev->stats.rx_errors++;
- mvpp2_rx_error(port, rx_desc);
- /* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
- continue;
- }
-
- if (bm_pool->frag_size > PAGE_SIZE)
- frag_size = 0;
- else
- frag_size = bm_pool->frag_size;
-
- skb = build_skb(data, frag_size);
- if (!skb) {
- netdev_warn(port->dev, "skb build failed\n");
- goto err_drop_frame;
- }
-
- err = mvpp2_rx_refill(port, bm_pool, pool);
- if (err) {
- netdev_err(port->dev, "failed to refill BM pools\n");
- goto err_drop_frame;
- }
-
- dma_unmap_single(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE);
-
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
-
- skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
- skb_put(skb, rx_bytes);
- skb->protocol = eth_type_trans(skb, dev);
- mvpp2_rx_csum(port, rx_status, skb);
-
- napi_gro_receive(napi, skb);
- }
-
- if (rcvd_pkts) {
- struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
-
- /* Update Rx queue management counters */
- wmb();
- mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
-
- return rx_todo;
-}
-
-static inline void
-tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
- struct mvpp2_tx_desc *desc)
-{
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- dma_addr_t buf_dma_addr =
- mvpp2_txdesc_dma_addr_get(port, desc);
- size_t buf_sz =
- mvpp2_txdesc_size_get(port, desc);
- if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
- dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
- buf_sz, DMA_TO_DEVICE);
- mvpp2_txq_desc_put(txq);
-}
-
-/* Handle tx fragmentation processing */
-static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
- struct mvpp2_tx_queue *aggr_txq,
- struct mvpp2_tx_queue *txq)
-{
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
- struct mvpp2_tx_desc *tx_desc;
- int i;
- dma_addr_t buf_dma_addr;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
-
- tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, frag->size);
-
- buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- frag->size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
- mvpp2_txq_desc_put(txq);
- goto cleanup;
- }
-
- mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
-
- if (i == (skb_shinfo(skb)->nr_frags - 1)) {
- /* Last descriptor */
- mvpp2_txdesc_cmd_set(port, tx_desc,
- MVPP2_TXD_L_DESC);
- mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
- } else {
- /* Descriptor in the middle: Not First, Not Last */
- mvpp2_txdesc_cmd_set(port, tx_desc, 0);
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
- }
- }
-
- return 0;
-cleanup:
- /* Release all descriptors that were used to map fragments of
- * this packet, as well as the corresponding DMA mappings
- */
- for (i = i - 1; i >= 0; i--) {
- tx_desc = txq->descs + i;
- tx_desc_unmap_put(port, txq, tx_desc);
- }
-
- return -ENOMEM;
-}
-
-static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
- struct net_device *dev,
- struct mvpp2_tx_queue *txq,
- struct mvpp2_tx_queue *aggr_txq,
- struct mvpp2_txq_pcpu *txq_pcpu,
- int hdr_sz)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- dma_addr_t addr;
-
- mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
-
- addr = txq_pcpu->tso_headers_dma +
- txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
- mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
-
- mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
- MVPP2_TXD_F_DESC |
- MVPP2_TXD_PADDING_DISABLE);
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
-}
-
-static inline int mvpp2_tso_put_data(struct sk_buff *skb,
- struct net_device *dev, struct tso_t *tso,
- struct mvpp2_tx_queue *txq,
- struct mvpp2_tx_queue *aggr_txq,
- struct mvpp2_txq_pcpu *txq_pcpu,
- int sz, bool left, bool last)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- dma_addr_t buf_dma_addr;
-
- mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, sz);
-
- buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
- mvpp2_txq_desc_put(txq);
- return -ENOMEM;
- }
-
- mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
-
- if (!left) {
- mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
- if (last) {
- mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
- return 0;
- }
- } else {
- mvpp2_txdesc_cmd_set(port, tx_desc, 0);
- }
-
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
- return 0;
-}
-
-static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
- struct mvpp2_tx_queue *txq,
- struct mvpp2_tx_queue *aggr_txq,
- struct mvpp2_txq_pcpu *txq_pcpu)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct tso_t tso;
- int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
- int i, len, descs = 0;
-
- /* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
- tso_count_descs(skb)) ||
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
- tso_count_descs(skb)))
- return 0;
-
- tso_start(skb, &tso);
- len = skb->len - hdr_sz;
- while (len > 0) {
- int left = min_t(int, skb_shinfo(skb)->gso_size, len);
- char *hdr = txq_pcpu->tso_headers +
- txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
-
- len -= left;
- descs++;
-
- tso_build_hdr(skb, hdr, &tso, left, len == 0);
- mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
-
- while (left > 0) {
- int sz = min_t(int, tso.size, left);
- left -= sz;
- descs++;
-
- if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
- txq_pcpu, sz, left, len == 0))
- goto release;
- tso_build_data(skb, &tso, sz);
- }
- }
-
- return descs;
-
-release:
- for (i = descs - 1; i >= 0; i--) {
- struct mvpp2_tx_desc *tx_desc = txq->descs + i;
- tx_desc_unmap_put(port, txq, tx_desc);
- }
- return 0;
-}
-
-/* Main tx processing */
-static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_tx_queue *txq, *aggr_txq;
- struct mvpp2_txq_pcpu *txq_pcpu;
- struct mvpp2_tx_desc *tx_desc;
- dma_addr_t buf_dma_addr;
- int frags = 0;
- u16 txq_id;
- u32 tx_cmd;
-
- txq_id = skb_get_queue_mapping(skb);
- txq = port->txqs[txq_id];
- txq_pcpu = this_cpu_ptr(txq->pcpu);
- aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
-
- if (skb_is_gso(skb)) {
- frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
- goto out;
- }
- frags = skb_shinfo(skb)->nr_frags + 1;
-
- /* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
- txq_pcpu, frags)) {
- frags = 0;
- goto out;
- }
-
- /* Get a descriptor for the first part of the packet */
- tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
-
- buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
- mvpp2_txq_desc_put(txq);
- frags = 0;
- goto out;
- }
-
- mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
-
- tx_cmd = mvpp2_skb_tx_csum(port, skb);
-
- if (frags == 1) {
- /* First and Last descriptor */
- tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
- mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
- mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
- } else {
- /* First but not Last */
- tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
- mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
- mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
-
- /* Continue with other skb fragments */
- if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
- tx_desc_unmap_put(port, txq, tx_desc);
- frags = 0;
- }
- }
-
-out:
- if (frags > 0) {
- struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
- struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
-
- txq_pcpu->reserved_num -= frags;
- txq_pcpu->count += frags;
- aggr_txq->count += frags;
-
- /* Enable transmit */
- wmb();
- mvpp2_aggr_txq_pend_desc_add(port, frags);
-
- if (txq_pcpu->count >= txq_pcpu->stop_threshold)
- netif_tx_stop_queue(nq);
-
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
- } else {
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- }
-
- /* Finalize TX processing */
- if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
- mvpp2_txq_done(port, txq, txq_pcpu);
-
- /* Set the timer in case not all frags were processed */
- if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
- txq_pcpu->count > 0) {
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
-
- mvpp2_timer_set(port_pcpu);
- }
-
- return NETDEV_TX_OK;
-}
-
-static inline void mvpp2_cause_error(struct net_device *dev, int cause)
-{
- if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
- netdev_err(dev, "FCS error\n");
- if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
- netdev_err(dev, "rx fifo overrun error\n");
- if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
- netdev_err(dev, "tx fifo underrun error\n");
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
- struct mvpp2_queue_vector *qv;
- int cpu = smp_processor_id();
-
- qv = container_of(napi, struct mvpp2_queue_vector, napi);
-
- /* Rx/Tx cause register
- *
- * Bits 0-15: each bit indicates received packets on the Rx queue
- * (bit 0 is for Rx queue 0).
- *
- * Bits 16-23: each bit indicates transmitted packets on the Tx queue
- * (bit 16 is for Tx queue 0).
- *
- * Each CPU has its own Rx/Tx cause register
- */
- cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
-
- cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
- if (cause_misc) {
- mvpp2_cause_error(port->dev, cause_misc);
-
- /* Clear the cause register */
- mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_percpu_write(port->priv, cpu,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
- cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
- }
-
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- if (cause_tx) {
- cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
- mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
- }
-
- /* Process RX packets */
- cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
- cause_rx <<= qv->first_rxq;
- cause_rx |= qv->pending_cause_rx;
- while (cause_rx && budget > 0) {
- int count;
- struct mvpp2_rx_queue *rxq;
-
- rxq = mvpp2_get_rx_queue(port, cause_rx);
- if (!rxq)
- break;
-
- count = mvpp2_rx(port, napi, budget, rxq);
- rx_done += count;
- budget -= count;
- if (budget > 0) {
- /* Clear the bit associated to this Rx queue
- * so that next iteration will continue from
- * the next Rx queue.
- */
- cause_rx &= ~(1 << rxq->logic_rxq);
- }
- }
-
- if (budget > 0) {
- cause_rx = 0;
- napi_complete_done(napi, rx_done);
-
- mvpp2_qvec_interrupt_enable(qv);
- }
- qv->pending_cause_rx = cause_rx;
- return rx_done;
-}
-
-/* Set hw internals when starting port */
-static void mvpp2_start_dev(struct mvpp2_port *port)
-{
- struct net_device *ndev = port->dev;
- int i;
-
- if (port->gop_id == 0 &&
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR))
- mvpp2_xlg_max_rx_size_set(port);
- else
- mvpp2_gmac_max_rx_size_set(port);
-
- mvpp2_txp_max_tx_size_set(port);
-
- for (i = 0; i < port->nqvecs; i++)
- napi_enable(&port->qvecs[i].napi);
-
- /* Enable interrupts on all CPUs */
- mvpp2_interrupts_enable(port);
-
- if (port->priv->hw_version == MVPP22) {
- mvpp22_comphy_init(port);
- mvpp22_gop_init(port);
- }
-
- mvpp2_port_mii_set(port);
- mvpp2_port_enable(port);
- if (ndev->phydev)
- phy_start(ndev->phydev);
- netif_tx_start_all_queues(port->dev);
-}
-
-/* Set hw internals when stopping port */
-static void mvpp2_stop_dev(struct mvpp2_port *port)
-{
- struct net_device *ndev = port->dev;
- int i;
-
- /* Stop new packets from arriving to RXQs */
- mvpp2_ingress_disable(port);
-
- mdelay(10);
-
- /* Disable interrupts on all CPUs */
- mvpp2_interrupts_disable(port);
-
- for (i = 0; i < port->nqvecs; i++)
- napi_disable(&port->qvecs[i].napi);
-
- netif_carrier_off(port->dev);
- netif_tx_stop_all_queues(port->dev);
-
- mvpp2_egress_disable(port);
- mvpp2_port_disable(port);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
- phy_power_off(port->comphy);
-}
-
-static int mvpp2_check_ringparam_valid(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- u16 new_rx_pending = ring->rx_pending;
- u16 new_tx_pending = ring->tx_pending;
-
- if (ring->rx_pending == 0 || ring->tx_pending == 0)
- return -EINVAL;
-
- if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
- new_rx_pending = MVPP2_MAX_RXD_MAX;
- else if (!IS_ALIGNED(ring->rx_pending, 16))
- new_rx_pending = ALIGN(ring->rx_pending, 16);
-
- if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
- new_tx_pending = MVPP2_MAX_TXD_MAX;
- else if (!IS_ALIGNED(ring->tx_pending, 32))
- new_tx_pending = ALIGN(ring->tx_pending, 32);
-
- /* The Tx ring size cannot be smaller than the minimum number of
- * descriptors needed for TSO.
- */
- if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
- new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
-
- if (ring->rx_pending != new_rx_pending) {
- netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
- ring->rx_pending, new_rx_pending);
- ring->rx_pending = new_rx_pending;
- }
-
- if (ring->tx_pending != new_tx_pending) {
- netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
- ring->tx_pending, new_tx_pending);
- ring->tx_pending = new_tx_pending;
- }
-
- return 0;
-}
-
-static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
-{
- u32 mac_addr_l, mac_addr_m, mac_addr_h;
-
- mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
- mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
- mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
- addr[0] = (mac_addr_h >> 24) & 0xFF;
- addr[1] = (mac_addr_h >> 16) & 0xFF;
- addr[2] = (mac_addr_h >> 8) & 0xFF;
- addr[3] = mac_addr_h & 0xFF;
- addr[4] = mac_addr_m & 0xFF;
- addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
-}
-
-static int mvpp2_phy_connect(struct mvpp2_port *port)
-{
- struct phy_device *phy_dev;
-
- /* No PHY is attached */
- if (!port->phy_node)
- return 0;
-
- phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
- port->phy_interface);
- if (!phy_dev) {
- netdev_err(port->dev, "cannot connect to phy\n");
- return -ENODEV;
- }
- phy_dev->supported &= PHY_GBIT_FEATURES;
- phy_dev->advertising = phy_dev->supported;
-
- port->link = 0;
- port->duplex = 0;
- port->speed = 0;
-
- return 0;
-}
-
-static void mvpp2_phy_disconnect(struct mvpp2_port *port)
-{
- struct net_device *ndev = port->dev;
-
- if (!ndev->phydev)
- return;
-
- phy_disconnect(ndev->phydev);
-}
-
-static int mvpp2_irqs_init(struct mvpp2_port *port)
-{
- int err, i;
-
- for (i = 0; i < port->nqvecs; i++) {
- struct mvpp2_queue_vector *qv = port->qvecs + i;
-
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
- irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
-
- err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
- if (err)
- goto err;
-
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
- irq_set_affinity_hint(qv->irq,
- cpumask_of(qv->sw_thread_id));
- }
-
- return 0;
-err:
- for (i = 0; i < port->nqvecs; i++) {
- struct mvpp2_queue_vector *qv = port->qvecs + i;
-
- irq_set_affinity_hint(qv->irq, NULL);
- free_irq(qv->irq, qv);
- }
-
- return err;
-}
-
-static void mvpp2_irqs_deinit(struct mvpp2_port *port)
-{
- int i;
-
- for (i = 0; i < port->nqvecs; i++) {
- struct mvpp2_queue_vector *qv = port->qvecs + i;
-
- irq_set_affinity_hint(qv->irq, NULL);
- irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
- free_irq(qv->irq, qv);
- }
-}
-
-static void mvpp22_init_rss(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- int i;
-
- /* Set the table width: replace the whole classifier Rx queue number
- * with the ones configured in RSS table entries.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
- mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
-
- /* Loop through the classifier Rx Queues and map them to a RSS table.
- * Map them all to the first table (0) by default.
- */
- for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
- mvpp2_write(priv, MVPP22_RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(0));
- }
-
- /* Configure the first table to evenly distribute the packets across
- * real Rx Queues. The table entries map a hash to an port Rx Queue.
- */
- for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
- MVPP22_RSS_INDEX_TABLE_ENTRY(i);
- mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
-
- mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
- }
-
-}
-
-static int mvpp2_open(struct net_device *dev)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2 *priv = port->priv;
- unsigned char mac_bcast[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- int err;
-
- err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
- if (err) {
- netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
- return err;
- }
- err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
- if (err) {
- netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
- return err;
- }
- err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
- if (err) {
- netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
- return err;
- }
- err = mvpp2_prs_def_flow(port);
- if (err) {
- netdev_err(dev, "mvpp2_prs_def_flow failed\n");
- return err;
- }
-
- /* Allocate the Rx/Tx queues */
- err = mvpp2_setup_rxqs(port);
- if (err) {
- netdev_err(port->dev, "cannot allocate Rx queues\n");
- return err;
- }
-
- err = mvpp2_setup_txqs(port);
- if (err) {
- netdev_err(port->dev, "cannot allocate Tx queues\n");
- goto err_cleanup_rxqs;
- }
-
- err = mvpp2_irqs_init(port);
- if (err) {
- netdev_err(port->dev, "cannot init IRQs\n");
- goto err_cleanup_txqs;
- }
-
- if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
- err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
- dev->name, port);
- if (err) {
- netdev_err(port->dev, "cannot request link IRQ %d\n",
- port->link_irq);
- goto err_free_irq;
- }
-
- mvpp22_gop_setup_irq(port);
- }
-
- /* In default link is down */
- netif_carrier_off(port->dev);
-
- err = mvpp2_phy_connect(port);
- if (err < 0)
- goto err_free_link_irq;
-
- /* Unmask interrupts on all CPUs */
- on_each_cpu(mvpp2_interrupts_unmask, port, 1);
- mvpp2_shared_interrupt_mask_unmask(port, false);
-
- mvpp2_start_dev(port);
-
- if (priv->hw_version == MVPP22)
- mvpp22_init_rss(port);
-
- /* Start hardware statistics gathering */
- queue_delayed_work(priv->stats_queue, &port->stats_work,
- MVPP2_MIB_COUNTERS_STATS_DELAY);
-
- return 0;
-
-err_free_link_irq:
- if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
- free_irq(port->link_irq, port);
-err_free_irq:
- mvpp2_irqs_deinit(port);
-err_cleanup_txqs:
- mvpp2_cleanup_txqs(port);
-err_cleanup_rxqs:
- mvpp2_cleanup_rxqs(port);
- return err;
-}
-
-static int mvpp2_stop(struct net_device *dev)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_port_pcpu *port_pcpu;
- struct mvpp2 *priv = port->priv;
- int cpu;
-
- mvpp2_stop_dev(port);
- mvpp2_phy_disconnect(port);
-
- /* Mask interrupts on all CPUs */
- on_each_cpu(mvpp2_interrupts_mask, port, 1);
- mvpp2_shared_interrupt_mask_unmask(port, true);
-
- if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
- free_irq(port->link_irq, port);
-
- mvpp2_irqs_deinit(port);
- if (!port->has_tx_irqs) {
- for_each_present_cpu(cpu) {
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
-
- hrtimer_cancel(&port_pcpu->tx_done_timer);
- port_pcpu->timer_scheduled = false;
- tasklet_kill(&port_pcpu->tx_done_tasklet);
- }
- }
- mvpp2_cleanup_rxqs(port);
- mvpp2_cleanup_txqs(port);
-
- cancel_delayed_work_sync(&port->stats_work);
-
- return 0;
-}
-
-static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
- struct netdev_hw_addr_list *list)
-{
- struct netdev_hw_addr *ha;
- int ret;
-
- netdev_hw_addr_list_for_each(ha, list) {
- ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
-{
- if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- mvpp2_prs_vid_enable_filtering(port);
- else
- mvpp2_prs_vid_disable_filtering(port);
-
- mvpp2_prs_mac_promisc_set(port->priv, port->id,
- MVPP2_PRS_L2_UNI_CAST, enable);
-
- mvpp2_prs_mac_promisc_set(port->priv, port->id,
- MVPP2_PRS_L2_MULTI_CAST, enable);
-}
-
-static void mvpp2_set_rx_mode(struct net_device *dev)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- /* Clear the whole UC and MC list */
- mvpp2_prs_mac_del_all(port);
-
- if (dev->flags & IFF_PROMISC) {
- mvpp2_set_rx_promisc(port, true);
- return;
- }
-
- mvpp2_set_rx_promisc(port, false);
-
- if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
- mvpp2_prs_mac_da_accept_list(port, &dev->uc))
- mvpp2_prs_mac_promisc_set(port->priv, port->id,
- MVPP2_PRS_L2_UNI_CAST, true);
-
- if (dev->flags & IFF_ALLMULTI) {
- mvpp2_prs_mac_promisc_set(port->priv, port->id,
- MVPP2_PRS_L2_MULTI_CAST, true);
- return;
- }
-
- if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
- mvpp2_prs_mac_da_accept_list(port, &dev->mc))
- mvpp2_prs_mac_promisc_set(port->priv, port->id,
- MVPP2_PRS_L2_MULTI_CAST, true);
-}
-
-static int mvpp2_set_mac_address(struct net_device *dev, void *p)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- const struct sockaddr *addr = p;
- int err;
-
- if (!is_valid_ether_addr(addr->sa_data)) {
- err = -EADDRNOTAVAIL;
- goto log_error;
- }
-
- if (!netif_running(dev)) {
- err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
- if (!err)
- return 0;
- /* Reconfigure parser to accept the original MAC address */
- err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
- if (err)
- goto log_error;
- }
-
- mvpp2_stop_dev(port);
-
- err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
- if (!err)
- goto out_start;
-
- /* Reconfigure parser accept the original MAC address */
- err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
- if (err)
- goto log_error;
-out_start:
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- return 0;
-log_error:
- netdev_err(dev, "failed to change MAC address\n");
- return err;
-}
-
-static int mvpp2_change_mtu(struct net_device *dev, int mtu)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int err;
-
- if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
- netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
- ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
- mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
- }
-
- if (!netif_running(dev)) {
- err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- return 0;
- }
-
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
- }
-
- mvpp2_stop_dev(port);
-
- err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- goto out_start;
- }
-
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
-
-out_start:
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
-
- return 0;
-log_error:
- netdev_err(dev, "failed to change MTU\n");
- return err;
-}
-
-static void
-mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- unsigned int start;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct mvpp2_pcpu_stats *cpu_stats;
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
-
- cpu_stats = per_cpu_ptr(port->stats, cpu);
- do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
-
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_dropped = dev->stats.rx_dropped;
- stats->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- int ret;
-
- if (!dev->phydev)
- return -ENOTSUPP;
-
- ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
- if (!ret)
- mvpp2_link_event(dev);
-
- return ret;
-}
-
-static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret;
-
- ret = mvpp2_prs_vid_entry_add(port, vid);
- if (ret)
- netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
- MVPP2_PRS_VLAN_FILT_MAX - 1);
- return ret;
-}
-
-static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- mvpp2_prs_vid_entry_remove(port, vid);
- return 0;
-}
-
-static int mvpp2_set_features(struct net_device *dev,
- netdev_features_t features)
-{
- netdev_features_t changed = dev->features ^ features;
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- mvpp2_prs_vid_enable_filtering(port);
- } else {
- /* Invalidate all registered VID filters for this
- * port
- */
- mvpp2_prs_vid_remove_all(port);
-
- mvpp2_prs_vid_disable_filtering(port);
- }
- }
-
- return 0;
-}
-
-/* Ethtool methods */
-
-/* Set interrupt coalescing for ethtools */
-static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *c)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int queue;
-
- for (queue = 0; queue < port->nrxqs; queue++) {
- struct mvpp2_rx_queue *rxq = port->rxqs[queue];
-
- rxq->time_coal = c->rx_coalesce_usecs;
- rxq->pkts_coal = c->rx_max_coalesced_frames;
- mvpp2_rx_pkts_coal_set(port, rxq);
- mvpp2_rx_time_coal_set(port, rxq);
- }
-
- if (port->has_tx_irqs) {
- port->tx_time_coal = c->tx_coalesce_usecs;
- mvpp2_tx_time_coal_set(port);
- }
-
- for (queue = 0; queue < port->ntxqs; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- txq->done_pkts_coal = c->tx_max_coalesced_frames;
-
- if (port->has_tx_irqs)
- mvpp2_tx_pkts_coal_set(port, txq);
- }
-
- return 0;
-}
-
-/* get coalescing for ethtools */
-static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *c)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
- c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
- c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
- c->tx_coalesce_usecs = port->tx_time_coal;
- return 0;
-}
-
-static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
- sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
- sizeof(drvinfo->bus_info));
-}
-
-static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
- ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
- ring->rx_pending = port->rx_ring_size;
- ring->tx_pending = port->tx_ring_size;
-}
-
-static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- u16 prev_rx_ring_size = port->rx_ring_size;
- u16 prev_tx_ring_size = port->tx_ring_size;
- int err;
-
- err = mvpp2_check_ringparam_valid(dev, ring);
- if (err)
- return err;
-
- if (!netif_running(dev)) {
- port->rx_ring_size = ring->rx_pending;
- port->tx_ring_size = ring->tx_pending;
- return 0;
- }
-
- /* The interface is running, so we have to force a
- * reallocation of the queues
- */
- mvpp2_stop_dev(port);
- mvpp2_cleanup_rxqs(port);
- mvpp2_cleanup_txqs(port);
-
- port->rx_ring_size = ring->rx_pending;
- port->tx_ring_size = ring->tx_pending;
-
- err = mvpp2_setup_rxqs(port);
- if (err) {
- /* Reallocate Rx queues with the original ring size */
- port->rx_ring_size = prev_rx_ring_size;
- ring->rx_pending = prev_rx_ring_size;
- err = mvpp2_setup_rxqs(port);
- if (err)
- goto err_out;
- }
- err = mvpp2_setup_txqs(port);
- if (err) {
- /* Reallocate Tx queues with the original ring size */
- port->tx_ring_size = prev_tx_ring_size;
- ring->tx_pending = prev_tx_ring_size;
- err = mvpp2_setup_txqs(port);
- if (err)
- goto err_clean_rxqs;
- }
-
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
-
- return 0;
-
-err_clean_rxqs:
- mvpp2_cleanup_rxqs(port);
-err_out:
- netdev_err(dev, "failed to change ring parameters");
- return err;
-}
-
-/* Device ops */
-
-static const struct net_device_ops mvpp2_netdev_ops = {
- .ndo_open = mvpp2_open,
- .ndo_stop = mvpp2_stop,
- .ndo_start_xmit = mvpp2_tx,
- .ndo_set_rx_mode = mvpp2_set_rx_mode,
- .ndo_set_mac_address = mvpp2_set_mac_address,
- .ndo_change_mtu = mvpp2_change_mtu,
- .ndo_get_stats64 = mvpp2_get_stats64,
- .ndo_do_ioctl = mvpp2_ioctl,
- .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
- .ndo_set_features = mvpp2_set_features,
-};
-
-static const struct ethtool_ops mvpp2_eth_tool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
- .get_link = ethtool_op_get_link,
- .set_coalesce = mvpp2_ethtool_set_coalesce,
- .get_coalesce = mvpp2_ethtool_get_coalesce,
- .get_drvinfo = mvpp2_ethtool_get_drvinfo,
- .get_ringparam = mvpp2_ethtool_get_ringparam,
- .set_ringparam = mvpp2_ethtool_set_ringparam,
- .get_strings = mvpp2_ethtool_get_strings,
- .get_ethtool_stats = mvpp2_ethtool_get_stats,
- .get_sset_count = mvpp2_ethtool_get_sset_count,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
- * had a single IRQ defined per-port.
- */
-static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
- struct device_node *port_node)
-{
- struct mvpp2_queue_vector *v = &port->qvecs[0];
-
- v->first_rxq = 0;
- v->nrxqs = port->nrxqs;
- v->type = MVPP2_QUEUE_VECTOR_SHARED;
- v->sw_thread_id = 0;
- v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
- v->port = port;
- v->irq = irq_of_parse_and_map(port_node, 0);
- if (v->irq <= 0)
- return -EINVAL;
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
-
- port->nqvecs = 1;
-
- return 0;
-}
-
-static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
- struct device_node *port_node)
-{
- struct mvpp2_queue_vector *v;
- int i, ret;
-
- port->nqvecs = num_possible_cpus();
- if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
- port->nqvecs += 1;
-
- for (i = 0; i < port->nqvecs; i++) {
- char irqname[16];
-
- v = port->qvecs + i;
-
- v->port = port;
- v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
- v->sw_thread_id = i;
- v->sw_thread_mask = BIT(i);
-
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
-
- if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
- v->first_rxq = i * MVPP2_DEFAULT_RXQ;
- v->nrxqs = MVPP2_DEFAULT_RXQ;
- } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
- i == (port->nqvecs - 1)) {
- v->first_rxq = 0;
- v->nrxqs = port->nrxqs;
- v->type = MVPP2_QUEUE_VECTOR_SHARED;
- strncpy(irqname, "rx-shared", sizeof(irqname));
- }
-
- if (port_node)
- v->irq = of_irq_get_byname(port_node, irqname);
- else
- v->irq = fwnode_irq_get(port->fwnode, i);
- if (v->irq <= 0) {
- ret = -EINVAL;
- goto err;
- }
-
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
- }
-
- return 0;
-
-err:
- for (i = 0; i < port->nqvecs; i++)
- irq_dispose_mapping(port->qvecs[i].irq);
- return ret;
-}
-
-static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
- struct device_node *port_node)
-{
- if (port->has_tx_irqs)
- return mvpp2_multi_queue_vectors_init(port, port_node);
- else
- return mvpp2_simple_queue_vectors_init(port, port_node);
-}
-
-static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
-{
- int i;
-
- for (i = 0; i < port->nqvecs; i++)
- irq_dispose_mapping(port->qvecs[i].irq);
-}
-
-/* Configure Rx queue group interrupt for this port */
-static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- u32 val;
- int i;
-
- if (priv->hw_version == MVPP21) {
- mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
- port->nrxqs);
- return;
- }
-
- /* Handle the more complicated PPv2.2 case */
- for (i = 0; i < port->nqvecs; i++) {
- struct mvpp2_queue_vector *qv = port->qvecs + i;
-
- if (!qv->nrxqs)
- continue;
-
- val = qv->sw_thread_id;
- val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
- mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
-
- val = qv->first_rxq;
- val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
- mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
- }
-}
-
-/* Initialize port HW */
-static int mvpp2_port_init(struct mvpp2_port *port)
-{
- struct device *dev = port->dev->dev.parent;
- struct mvpp2 *priv = port->priv;
- struct mvpp2_txq_pcpu *txq_pcpu;
- int queue, cpu, err;
-
- /* Checks for hardware constraints */
- if (port->first_rxq + port->nrxqs >
- MVPP2_MAX_PORTS * priv->max_port_rxqs)
- return -EINVAL;
-
- if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
- (port->ntxqs > MVPP2_MAX_TXQ))
- return -EINVAL;
-
- /* Disable port */
- mvpp2_egress_disable(port);
- mvpp2_port_disable(port);
-
- port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
-
- port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
- GFP_KERNEL);
- if (!port->txqs)
- return -ENOMEM;
-
- /* Associate physical Tx queues to this port and initialize.
- * The mapping is predefined.
- */
- for (queue = 0; queue < port->ntxqs; queue++) {
- int queue_phy_id = mvpp2_txq_phys(port->id, queue);
- struct mvpp2_tx_queue *txq;
-
- txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
- if (!txq) {
- err = -ENOMEM;
- goto err_free_percpu;
- }
-
- txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
- if (!txq->pcpu) {
- err = -ENOMEM;
- goto err_free_percpu;
- }
-
- txq->id = queue_phy_id;
- txq->log_id = queue;
- txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- txq_pcpu->cpu = cpu;
- }
-
- port->txqs[queue] = txq;
- }
-
- port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
- GFP_KERNEL);
- if (!port->rxqs) {
- err = -ENOMEM;
- goto err_free_percpu;
- }
-
- /* Allocate and initialize Rx queue for this port */
- for (queue = 0; queue < port->nrxqs; queue++) {
- struct mvpp2_rx_queue *rxq;
-
- /* Map physical Rx queue to port's logical Rx queue */
- rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
- if (!rxq) {
- err = -ENOMEM;
- goto err_free_percpu;
- }
- /* Map this Rx queue to a physical queue */
- rxq->id = port->first_rxq + queue;
- rxq->port = port->id;
- rxq->logic_rxq = queue;
-
- port->rxqs[queue] = rxq;
- }
-
- mvpp2_rx_irqs_setup(port);
-
- /* Create Rx descriptor rings */
- for (queue = 0; queue < port->nrxqs; queue++) {
- struct mvpp2_rx_queue *rxq = port->rxqs[queue];
-
- rxq->size = port->rx_ring_size;
- rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
- rxq->time_coal = MVPP2_RX_COAL_USEC;
- }
-
- mvpp2_ingress_disable(port);
-
- /* Port default configuration */
- mvpp2_defaults_set(port);
-
- /* Port's classifier configuration */
- mvpp2_cls_oversize_rxq_set(port);
- mvpp2_cls_port_config(port);
-
- /* Provide an initial Rx packet size */
- port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
-
- /* Initialize pools for swf */
- err = mvpp2_swf_bm_pool_init(port);
- if (err)
- goto err_free_percpu;
-
- return 0;
-
-err_free_percpu:
- for (queue = 0; queue < port->ntxqs; queue++) {
- if (!port->txqs[queue])
- continue;
- free_percpu(port->txqs[queue]->pcpu);
- }
- return err;
-}
-
-/* Checks if the port DT description has the TX interrupts
- * described. On PPv2.1, there are no such interrupts. On PPv2.2,
- * there are available, but we need to keep support for old DTs.
- */
-static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
- struct device_node *port_node)
-{
- char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
- "tx-cpu2", "tx-cpu3" };
- int ret, i;
-
- if (priv->hw_version == MVPP21)
- return false;
-
- for (i = 0; i < 5; i++) {
- ret = of_property_match_string(port_node, "interrupt-names",
- irqs[i]);
- if (ret < 0)
- return false;
- }
-
- return true;
-}
-
-static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct fwnode_handle *fwnode,
- char **mac_from)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- char hw_mac_addr[ETH_ALEN] = {0};
- char fw_mac_addr[ETH_ALEN];
-
- if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
- *mac_from = "firmware node";
- ether_addr_copy(dev->dev_addr, fw_mac_addr);
- return;
- }
-
- if (priv->hw_version == MVPP21) {
- mvpp21_get_mac_address(port, hw_mac_addr);
- if (is_valid_ether_addr(hw_mac_addr)) {
- *mac_from = "hardware";
- ether_addr_copy(dev->dev_addr, hw_mac_addr);
- return;
- }
- }
-
- *mac_from = "random";
- eth_hw_addr_random(dev);
-}
-
-/* Ports initialization */
-static int mvpp2_port_probe(struct platform_device *pdev,
- struct fwnode_handle *port_fwnode,
- struct mvpp2 *priv)
-{
- struct device_node *phy_node;
- struct phy *comphy = NULL;
- struct mvpp2_port *port;
- struct mvpp2_port_pcpu *port_pcpu;
- struct device_node *port_node = to_of_node(port_fwnode);
- struct net_device *dev;
- struct resource *res;
- char *mac_from = "";
- unsigned int ntxqs, nrxqs;
- bool has_tx_irqs;
- u32 id;
- int features;
- int phy_mode;
- int err, i, cpu;
-
- if (port_node) {
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
- } else {
- has_tx_irqs = true;
- queue_mode = MVPP2_QDIST_MULTI_MODE;
- }
-
- if (!has_tx_irqs)
- queue_mode = MVPP2_QDIST_SINGLE_MODE;
-
- ntxqs = MVPP2_MAX_TXQ;
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
- nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
- else
- nrxqs = MVPP2_DEFAULT_RXQ;
-
- dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
- if (!dev)
- return -ENOMEM;
-
- if (port_node)
- phy_node = of_parse_phandle(port_node, "phy", 0);
- else
- phy_node = NULL;
-
- phy_mode = fwnode_get_phy_mode(port_fwnode);
- if (phy_mode < 0) {
- dev_err(&pdev->dev, "incorrect phy mode\n");
- err = phy_mode;
- goto err_free_netdev;
- }
-
- if (port_node) {
- comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
- if (IS_ERR(comphy)) {
- if (PTR_ERR(comphy) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
- goto err_free_netdev;
- }
- comphy = NULL;
- }
- }
-
- if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
- err = -EINVAL;
- dev_err(&pdev->dev, "missing port-id value\n");
- goto err_free_netdev;
- }
-
- dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
- dev->watchdog_timeo = 5 * HZ;
- dev->netdev_ops = &mvpp2_netdev_ops;
- dev->ethtool_ops = &mvpp2_eth_tool_ops;
-
- port = netdev_priv(dev);
- port->dev = dev;
- port->fwnode = port_fwnode;
- port->ntxqs = ntxqs;
- port->nrxqs = nrxqs;
- port->priv = priv;
- port->has_tx_irqs = has_tx_irqs;
-
- err = mvpp2_queue_vectors_init(port, port_node);
- if (err)
- goto err_free_netdev;
-
- if (port_node)
- port->link_irq = of_irq_get_byname(port_node, "link");
- else
- port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
- if (port->link_irq == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
- goto err_deinit_qvecs;
- }
- if (port->link_irq <= 0)
- /* the link irq is optional */
- port->link_irq = 0;
-
- if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
- port->flags |= MVPP2_F_LOOPBACK;
-
- port->id = id;
- if (priv->hw_version == MVPP21)
- port->first_rxq = port->id * port->nrxqs;
- else
- port->first_rxq = port->id * priv->max_port_rxqs;
-
- port->phy_node = phy_node;
- port->phy_interface = phy_mode;
- port->comphy = comphy;
-
- if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(port->base)) {
- err = PTR_ERR(port->base);
- goto err_free_irq;
- }
-
- port->stats_base = port->priv->lms_base +
- MVPP21_MIB_COUNTERS_OFFSET +
- port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
- } else {
- if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
- &port->gop_id)) {
- err = -EINVAL;
- dev_err(&pdev->dev, "missing gop-port-id value\n");
- goto err_deinit_qvecs;
- }
-
- port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
- port->stats_base = port->priv->iface_base +
- MVPP22_MIB_COUNTERS_OFFSET +
- port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
- }
-
- /* Alloc per-cpu and ethtool stats */
- port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
- if (!port->stats) {
- err = -ENOMEM;
- goto err_free_irq;
- }
-
- port->ethtool_stats = devm_kcalloc(&pdev->dev,
- ARRAY_SIZE(mvpp2_ethtool_regs),
- sizeof(u64), GFP_KERNEL);
- if (!port->ethtool_stats) {
- err = -ENOMEM;
- goto err_free_stats;
- }
-
- mutex_init(&port->gather_stats_lock);
- INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
-
- mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
-
- port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
- port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- err = mvpp2_port_init(port);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to init port %d\n", id);
- goto err_free_stats;
- }
-
- mvpp2_port_periodic_xon_disable(port);
-
- if (priv->hw_version == MVPP21)
- mvpp2_port_fc_adv_enable(port);
-
- mvpp2_port_reset(port);
-
- port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
- if (!port->pcpu) {
- err = -ENOMEM;
- goto err_free_txq_pcpu;
- }
-
- if (!port->has_tx_irqs) {
- for_each_present_cpu(cpu) {
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
-
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
- port_pcpu->timer_scheduled = false;
-
- tasklet_init(&port_pcpu->tx_done_tasklet,
- mvpp2_tx_proc_cb,
- (unsigned long)dev);
- }
- }
-
- features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO;
- dev->features = features | NETIF_F_RXCSUM;
- dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
- NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- }
-
- dev->vlan_features |= features;
- dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
- dev->priv_flags |= IFF_UNICAST_FLT;
-
- /* MTU range: 68 - 9704 */
- dev->min_mtu = ETH_MIN_MTU;
- /* 9704 == 9728 - 20 and rounding to 8 */
- dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
-
- err = register_netdev(dev);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_port_pcpu;
- }
- netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
-
- priv->port_list[priv->port_count++] = port;
-
- return 0;
-
-err_free_port_pcpu:
- free_percpu(port->pcpu);
-err_free_txq_pcpu:
- for (i = 0; i < port->ntxqs; i++)
- free_percpu(port->txqs[i]->pcpu);
-err_free_stats:
- free_percpu(port->stats);
-err_free_irq:
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
-err_deinit_qvecs:
- mvpp2_queue_vectors_deinit(port);
-err_free_netdev:
- of_node_put(phy_node);
- free_netdev(dev);
- return err;
-}
-
-/* Ports removal routine */
-static void mvpp2_port_remove(struct mvpp2_port *port)
-{
- int i;
-
- unregister_netdev(port->dev);
- of_node_put(port->phy_node);
- free_percpu(port->pcpu);
- free_percpu(port->stats);
- for (i = 0; i < port->ntxqs; i++)
- free_percpu(port->txqs[i]->pcpu);
- mvpp2_queue_vectors_deinit(port);
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
- free_netdev(port->dev);
-}
-
-/* Initialize decoding windows */
-static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
- struct mvpp2 *priv)
-{
- u32 win_enable;
- int i;
-
- for (i = 0; i < 6; i++) {
- mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
- mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
-
- if (i < 4)
- mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
- }
-
- win_enable = 0;
-
- for (i = 0; i < dram->num_cs; i++) {
- const struct mbus_dram_window *cs = dram->cs + i;
-
- mvpp2_write(priv, MVPP2_WIN_BASE(i),
- (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
- dram->mbus_dram_target_id);
-
- mvpp2_write(priv, MVPP2_WIN_SIZE(i),
- (cs->size - 1) & 0xffff0000);
-
- win_enable |= (1 << i);
- }
-
- mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
-}
-
-/* Initialize Rx FIFO's */
-static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
-{
- int port;
-
- for (port = 0; port < MVPP2_MAX_PORTS; port++) {
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
- }
-
- mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
- MVPP2_RX_FIFO_PORT_MIN_PKT);
- mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
-}
-
-static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
-{
- int port;
-
- /* The FIFO size parameters are set depending on the maximum speed a
- * given port can handle:
- * - Port 0: 10Gbps
- * - Port 1: 2.5Gbps
- * - Ports 2 and 3: 1Gbps
- */
-
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
-
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
-
- for (port = 2; port < MVPP2_MAX_PORTS; port++) {
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
- }
-
- mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
- MVPP2_RX_FIFO_PORT_MIN_PKT);
- mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
-}
-
-/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
- * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
- * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
- */
-static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
-{
- int port, size, thrs;
-
- for (port = 0; port < MVPP2_MAX_PORTS; port++) {
- if (port == 0) {
- size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
- } else {
- size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
- }
- mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
- mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
- }
-}
-
-static void mvpp2_axi_init(struct mvpp2 *priv)
-{
- u32 val, rdval, wrval;
-
- mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
-
- /* AXI Bridge Configuration */
-
- rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
- << MVPP22_AXI_ATTR_CACHE_OFFS;
- rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
- << MVPP22_AXI_ATTR_DOMAIN_OFFS;
-
- wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
- << MVPP22_AXI_ATTR_CACHE_OFFS;
- wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
- << MVPP22_AXI_ATTR_DOMAIN_OFFS;
-
- /* BM */
- mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
- mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
-
- /* Descriptors */
- mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
- mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
- mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
- mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
-
- /* Buffer Data */
- mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
- mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
-
- val = MVPP22_AXI_CODE_CACHE_NON_CACHE
- << MVPP22_AXI_CODE_CACHE_OFFS;
- val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
- << MVPP22_AXI_CODE_DOMAIN_OFFS;
- mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
- mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
-
- val = MVPP22_AXI_CODE_CACHE_RD_CACHE
- << MVPP22_AXI_CODE_CACHE_OFFS;
- val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
- << MVPP22_AXI_CODE_DOMAIN_OFFS;
-
- mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
-
- val = MVPP22_AXI_CODE_CACHE_WR_CACHE
- << MVPP22_AXI_CODE_CACHE_OFFS;
- val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
- << MVPP22_AXI_CODE_DOMAIN_OFFS;
-
- mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
-}
-
-/* Initialize network controller common part HW */
-static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
-{
- const struct mbus_dram_target_info *dram_target_info;
- int err, i;
- u32 val;
-
- /* MBUS windows configuration */
- dram_target_info = mv_mbus_dram_info();
- if (dram_target_info)
- mvpp2_conf_mbus_windows(dram_target_info, priv);
-
- if (priv->hw_version == MVPP22)
- mvpp2_axi_init(priv);
-
- /* Disable HW PHY polling */
- if (priv->hw_version == MVPP21) {
- val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
- val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
- writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
- } else {
- val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
- val &= ~MVPP22_SMI_POLLING_EN;
- writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
- }
-
- /* Allocate and initialize aggregated TXQs */
- priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
- sizeof(*priv->aggr_txqs),
- GFP_KERNEL);
- if (!priv->aggr_txqs)
- return -ENOMEM;
-
- for_each_present_cpu(i) {
- priv->aggr_txqs[i].id = i;
- priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
- err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
- if (err < 0)
- return err;
- }
-
- /* Fifo Init */
- if (priv->hw_version == MVPP21) {
- mvpp2_rx_fifo_init(priv);
- } else {
- mvpp22_rx_fifo_init(priv);
- mvpp22_tx_fifo_init(priv);
- }
-
- if (priv->hw_version == MVPP21)
- writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
- priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
-
- /* Allow cache snoop when transmiting packets */
- mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
-
- /* Buffer Manager initialization */
- err = mvpp2_bm_init(pdev, priv);
- if (err < 0)
- return err;
-
- /* Parser default initialization */
- err = mvpp2_prs_default_init(pdev, priv);
- if (err < 0)
- return err;
-
- /* Classifier default initialization */
- mvpp2_cls_init(priv);
-
- return 0;
-}
-
-static int mvpp2_probe(struct platform_device *pdev)
-{
- const struct acpi_device_id *acpi_id;
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- struct fwnode_handle *port_fwnode;
- struct mvpp2 *priv;
- struct resource *res;
- void __iomem *base;
- int i;
- int err;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- if (has_acpi_companion(&pdev->dev)) {
- acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
- &pdev->dev);
- priv->hw_version = (unsigned long)acpi_id->driver_data;
- } else {
- priv->hw_version =
- (unsigned long)of_device_get_match_data(&pdev->dev);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->lms_base))
- return PTR_ERR(priv->lms_base);
- } else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (has_acpi_companion(&pdev->dev)) {
- /* In case the MDIO memory region is declared in
- * the ACPI, it can already appear as 'in-use'
- * in the OS. Because it is overlapped by second
- * region of the network controller, make
- * sure it is released, before requesting it again.
- * The care is taken by mvpp2 driver to avoid
- * concurrent access to this memory region.
- */
- release_resource(res);
- }
- priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->iface_base))
- return PTR_ERR(priv->iface_base);
- }
-
- if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
- priv->sysctrl_base =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "marvell,system-controller");
- if (IS_ERR(priv->sysctrl_base))
- /* The system controller regmap is optional for dt
- * compatibility reasons. When not provided, the
- * configuration of the GoP relies on the
- * firmware/bootloader.
- */
- priv->sysctrl_base = NULL;
- }
-
- mvpp2_setup_bm_pool();
-
- for (i = 0; i < MVPP2_MAX_THREADS; i++) {
- u32 addr_space_sz;
-
- addr_space_sz = (priv->hw_version == MVPP21 ?
- MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
- priv->swth_base[i] = base + i * addr_space_sz;
- }
-
- if (priv->hw_version == MVPP21)
- priv->max_port_rxqs = 8;
- else
- priv->max_port_rxqs = 32;
-
- if (dev_of_node(&pdev->dev)) {
- priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
- if (IS_ERR(priv->pp_clk))
- return PTR_ERR(priv->pp_clk);
- err = clk_prepare_enable(priv->pp_clk);
- if (err < 0)
- return err;
-
- priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
- if (IS_ERR(priv->gop_clk)) {
- err = PTR_ERR(priv->gop_clk);
- goto err_pp_clk;
- }
- err = clk_prepare_enable(priv->gop_clk);
- if (err < 0)
- goto err_pp_clk;
-
- if (priv->hw_version == MVPP22) {
- priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
- if (IS_ERR(priv->mg_clk)) {
- err = PTR_ERR(priv->mg_clk);
- goto err_gop_clk;
- }
-
- err = clk_prepare_enable(priv->mg_clk);
- if (err < 0)
- goto err_gop_clk;
-
- priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
- if (IS_ERR(priv->mg_core_clk)) {
- priv->mg_core_clk = NULL;
- } else {
- err = clk_prepare_enable(priv->mg_core_clk);
- if (err < 0)
- goto err_mg_clk;
- }
- }
-
- priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
- if (IS_ERR(priv->axi_clk)) {
- err = PTR_ERR(priv->axi_clk);
- if (err == -EPROBE_DEFER)
- goto err_mg_core_clk;
- priv->axi_clk = NULL;
- } else {
- err = clk_prepare_enable(priv->axi_clk);
- if (err < 0)
- goto err_mg_core_clk;
- }
-
- /* Get system's tclk rate */
- priv->tclk = clk_get_rate(priv->pp_clk);
- } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
- &priv->tclk)) {
- dev_err(&pdev->dev, "missing clock-frequency value\n");
- return -EINVAL;
- }
-
- if (priv->hw_version == MVPP22) {
- err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
- if (err)
- goto err_axi_clk;
- /* Sadly, the BM pools all share the same register to
- * store the high 32 bits of their address. So they
- * must all have the same high 32 bits, which forces
- * us to restrict coherent memory to DMA_BIT_MASK(32).
- */
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- goto err_axi_clk;
- }
-
- /* Initialize network controller */
- err = mvpp2_init(pdev, priv);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to initialize controller\n");
- goto err_axi_clk;
- }
-
- /* Initialize ports */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- err = mvpp2_port_probe(pdev, port_fwnode, priv);
- if (err < 0)
- goto err_port_probe;
- }
-
- if (priv->port_count == 0) {
- dev_err(&pdev->dev, "no ports enabled\n");
- err = -ENODEV;
- goto err_axi_clk;
- }
-
- /* Statistics must be gathered regularly because some of them (like
- * packets counters) are 32-bit registers and could overflow quite
- * quickly. For instance, a 10Gb link used at full bandwidth with the
- * smallest packets (64B) will overflow a 32-bit counter in less than
- * 30 seconds. Then, use a workqueue to fill 64-bit counters.
- */
- snprintf(priv->queue_name, sizeof(priv->queue_name),
- "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
- priv->port_count > 1 ? "+" : "");
- priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
- if (!priv->stats_queue) {
- err = -ENOMEM;
- goto err_port_probe;
- }
-
- platform_set_drvdata(pdev, priv);
- return 0;
-
-err_port_probe:
- i = 0;
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i])
- mvpp2_port_remove(priv->port_list[i]);
- i++;
- }
-err_axi_clk:
- clk_disable_unprepare(priv->axi_clk);
-
-err_mg_core_clk:
- if (priv->hw_version == MVPP22)
- clk_disable_unprepare(priv->mg_core_clk);
-err_mg_clk:
- if (priv->hw_version == MVPP22)
- clk_disable_unprepare(priv->mg_clk);
-err_gop_clk:
- clk_disable_unprepare(priv->gop_clk);
-err_pp_clk:
- clk_disable_unprepare(priv->pp_clk);
- return err;
-}
-
-static int mvpp2_remove(struct platform_device *pdev)
-{
- struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- struct fwnode_handle *port_fwnode;
- int i = 0;
-
- flush_workqueue(priv->stats_queue);
- destroy_workqueue(priv->stats_queue);
-
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i]) {
- mutex_destroy(&priv->port_list[i]->gather_stats_lock);
- mvpp2_port_remove(priv->port_list[i]);
- }
- i++;
- }
-
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
- struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
-
- mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
- }
-
- for_each_present_cpu(i) {
- struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
-
- dma_free_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- aggr_txq->descs,
- aggr_txq->descs_dma);
- }
-
- if (is_acpi_node(port_fwnode))
- return 0;
-
- clk_disable_unprepare(priv->axi_clk);
- clk_disable_unprepare(priv->mg_core_clk);
- clk_disable_unprepare(priv->mg_clk);
- clk_disable_unprepare(priv->pp_clk);
- clk_disable_unprepare(priv->gop_clk);
-
- return 0;
-}
-
-static const struct of_device_id mvpp2_match[] = {
- {
- .compatible = "marvell,armada-375-pp2",
- .data = (void *)MVPP21,
- },
- {
- .compatible = "marvell,armada-7k-pp22",
- .data = (void *)MVPP22,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, mvpp2_match);
-
-static const struct acpi_device_id mvpp2_acpi_match[] = {
- { "MRVL0110", MVPP22 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
-
-static struct platform_driver mvpp2_driver = {
- .probe = mvpp2_probe,
- .remove = mvpp2_remove,
- .driver = {
- .name = MVPP2_DRIVER_NAME,
- .of_match_table = mvpp2_match,
- .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
- },
-};
-
-module_platform_driver(mvpp2_driver);
-
-MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
-MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
new file mode 100644
index 000000000000..4d11dd9e3246
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Marvell PPv2 driver.
+#
+obj-$(CONFIG_MVPP2) := mvpp2.o
+
+mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
new file mode 100644
index 000000000000..def00dc3eb4e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -0,0 +1,1046 @@
+/*
+ * Definitions for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#ifndef _MVPP2_H_
+#define _MVPP2_H_
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+/* Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
+#define MVPP2_RX_FIFO_INIT_REG 0x64
+#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
+#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
+#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
+#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
+#define MVPP2_POOL_BUF_SIZE_OFFSET 5
+#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
+#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
+#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
+#define MVPP2_RXQ_POOL_SHORT_OFFS 20
+#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
+#define MVPP2_RXQ_POOL_LONG_OFFS 24
+#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
+#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
+#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
+#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+
+/* Top Registers */
+#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
+#define MVPP2_DSA_EXTENDED BIT(5)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
+#define MVPP2_PRS_PORT_LU_MAX 0xf
+#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
+#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
+#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
+#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
+#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
+#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
+#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+
+/* RSS Registers */
+#define MVPP22_RSS_INDEX 0x1500
+#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
+#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
+#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
+#define MVPP22_RSS_TABLE 0x1510
+#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_WIDTH 0x150c
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG 0x1800
+#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG 0x1810
+#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG 0x1814
+#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
+#define MVPP2_CLS_LKP_TBL_REG 0x1818
+#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
+#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG 0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP22_DESC_ADDR_OFFS 8
+#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
+#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
+#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
+#define MVPP2_RXQ_NUM_NEW_OFFSET 16
+#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
+#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
+#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
+#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
+#define MVPP2_RXQ_THRESH_REG 0x204c
+#define MVPP2_OCCUPIED_THRESH_OFFSET 0
+#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
+#define MVPP2_RXQ_INDEX_REG 0x2050
+#define MVPP2_TXQ_NUM_REG 0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
+#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
+#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_TXQ_THRESH_REG 0x2094
+#define MVPP2_TXQ_THRESH_OFFSET 16
+#define MVPP2_TXQ_THRESH_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
+#define MVPP2_TXQ_INDEX_REG 0x2098
+#define MVPP2_TXQ_PREF_BUF_REG 0x209c
+#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
+#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
+#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
+#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
+#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
+#define MVPP2_TXQ_PENDING_REG 0x20a0
+#define MVPP2_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
+#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
+#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
+#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
+#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
+#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
+#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
+#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE 0x4060
+
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+
+/* Values for AXI Bridge registers */
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
+#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
+
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
+#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
+
+#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
+#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
+#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
+#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
+#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
+#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
+#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
+#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
+#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
+#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
+#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
+#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
+#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
+#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
+#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
+#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
+#define MVPP2_BM_START_MASK BIT(0)
+#define MVPP2_BM_STOP_MASK BIT(1)
+#define MVPP2_BM_STATE_MASK BIT(4)
+#define MVPP2_BM_LOW_THRESH_OFFS 8
+#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
+#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
+ MVPP2_BM_LOW_THRESH_OFFS)
+#define MVPP2_BM_HIGH_THRESH_OFFS 16
+#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
+#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
+ MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
+#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
+#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
+#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
+#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
+#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
+#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
+#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
+#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
+#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
+#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG 0x64c0
+#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
+#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
+#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
+#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
+#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
+#define MVPP2_TXP_SCHED_MTU_REG 0x801c
+#define MVPP2_TXP_MTU_MAX 0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
+#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
+#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
+#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG 0x8800
+#define MVPP2_TX_PORT_FLUSH_REG 0x8810
+#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE 0x24
+#define MVPP2_SRC_ADDR_HIGH 0x28
+#define MVPP2_PHY_AN_CFG0_REG 0x34
+#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG 0x0
+#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
+#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
+#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
+#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG 0x4
+#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
+#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
+#define MVPP2_GMAC_PCS_LB_EN_BIT 6
+#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
+#define MVPP2_GMAC_SA_LOW_OFFS 7
+#define MVPP2_GMAC_CTRL_2_REG 0x8
+#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
+#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
+#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
+#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
+#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
+#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
+#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
+#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
+#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4)
+#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
+#define MVPP2_GMAC_FC_ADV_EN BIT(9)
+#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10)
+#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
+#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVPP2_GMAC_STATUS0 0x10
+#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
+#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1)
+#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2)
+#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3)
+#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6)
+#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7)
+#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
+ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_INT_STAT 0x20
+#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
+#define MVPP22_GMAC_INT_MASK 0x24
+#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
+#define MVPP22_GMAC_CTRL_4_REG 0x90
+#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
+#define MVPP22_CTRL4_RX_FC_EN BIT(3)
+#define MVPP22_CTRL4_TX_FC_EN BIT(4)
+#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
+#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
+#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+#define MVPP22_GMAC_INT_SUM_MASK 0xa4
+#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
+
+/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+ * relative to port->base.
+ */
+#define MVPP22_XLG_CTRL0_REG 0x100
+#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
+#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
+#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
+#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8)
+#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
+#define MVPP22_XLG_CTRL1_REG 0x104
+#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
+#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
+#define MVPP22_XLG_STATUS 0x10c
+#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
+#define MVPP22_XLG_INT_STAT 0x114
+#define MVPP22_XLG_INT_STAT_LINK BIT(1)
+#define MVPP22_XLG_INT_MASK 0x118
+#define MVPP22_XLG_INT_MASK_LINK BIT(1)
+#define MVPP22_XLG_CTRL3_REG 0x11c
+#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
+#define MVPP22_XLG_EXT_INT_MASK 0x15c
+#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
+#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
+#define MVPP22_XLG_CTRL4_REG 0x184
+#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
+#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
+#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
+#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
+
+/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+#define MVPP22_SMI_MISC_CFG_REG 0x1204
+#define MVPP22_SMI_POLLING_EN BIT(10)
+
+#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* XPCS registers. PPv2.2 only */
+#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
+#define MVPP22_MPCS_CTRL 0x14
+#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
+#define MVPP22_MPCS_CLK_RESET 0x14c
+#define MAC_CLK_RESET_SD_TX BIT(0)
+#define MAC_CLK_RESET_SD_RX BIT(1)
+#define MAC_CLK_RESET_MAC BIT(2)
+#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
+#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
+
+/* XPCS registers. PPv2.2 only */
+#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
+#define MVPP22_XPCS_CFG0 0x0
+#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
+#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
+
+/* System controller registers. Accessed through a regmap. */
+#define GENCONF_SOFT_RESET1 0x1108
+#define GENCONF_SOFT_RESET1_GOP BIT(6)
+#define GENCONF_PORT_CTRL0 0x1110
+#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
+#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
+#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
+#define GENCONF_PORT_CTRL1 0x1114
+#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
+#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
+#define GENCONF_CTRL0 0x1120
+#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
+#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
+#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_TXDONE_COAL_USEC 1000
+#define MVPP2_RX_COAL_PKTS 32
+#define MVPP2_RX_COAL_USEC 64
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE 2
+#define MVPP2_ETH_TYPE_LEN 2
+#define MVPP2_PPPOE_HDR_SIZE 8
+#define MVPP2_VLAN_TAG_LEN 4
+#define MVPP2_VLAN_TAG_EDSA_LEN 8
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE 0xfffa
+
+#define MVPP2_TX_CSUM_MAX_SIZE 9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
+
+#define MVPP2_TX_MTU_MAX 0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT 16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS 4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ 8
+
+/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
+ * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
+ * multiply this value by two to count the maximum number of skb descs needed.
+ */
+#define MVPP2_MAX_TSO_SEGS 300
+#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ 4
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD_MAX 1024
+#define MVPP2_MAX_RXD_DFLT 128
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD_MAX 2048
+#define MVPP2_MAX_TXD_DFLT 1024
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK 64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE 256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE 32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
+#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+
+/* TX FIFO constants */
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
+#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
+#define MVPP2_TX_FIFO_THRESHOLD_10KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP2_TX_FIFO_THRESHOLD_3KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, cache_line_size())
+
+#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE 16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK BIT(0)
+
+/* Marvell tag types */
+enum mvpp2_tag_type {
+ MVPP2_TAG_TYPE_NONE = 0,
+ MVPP2_TAG_TYPE_MH = 1,
+ MVPP2_TAG_TYPE_DSA = 2,
+ MVPP2_TAG_TYPE_EDSA = 3,
+ MVPP2_TAG_TYPE_VLAN = 4,
+ MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* L2 cast enum */
+enum mvpp2_prs_l2_cast {
+ MVPP2_PRS_L2_UNI_CAST,
+ MVPP2_PRS_L2_MULTI_CAST,
+};
+
+/* L3 cast enum */
+enum mvpp2_prs_l3_cast {
+ MVPP2_PRS_L3_UNI_CAST,
+ MVPP2_PRS_L3_MULTI_CAST,
+ MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* BM constants */
+#define MVPP2_BM_JUMBO_BUF_NUM 512
+#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_SHORT_BUF_NUM 2048
+#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN 128
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS 8
+#define MVPP2_BM_COOKIE_CPU_OFFS 24
+
+#define MVPP2_BM_SHORT_FRAME_SIZE 512
+#define MVPP2_BM_LONG_FRAME_SIZE 2048
+#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
+#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
+#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
+
+#define MVPP21_ADDR_SPACE_SZ 0
+#define MVPP22_ADDR_SPACE_SZ SZ_64K
+
+#define MVPP2_MAX_THREADS 8
+#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
+
+/* GMAC MIB Counters register definitions */
+#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
+#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
+#define MVPP22_MIB_COUNTERS_OFFSET 0x0
+#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
+
+#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
+#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
+#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
+#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
+#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
+#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
+#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
+#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
+#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
+#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
+#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
+#define MVPP2_MIB_FC_SENT 0x54
+#define MVPP2_MIB_FC_RCVD 0x58
+#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
+#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
+#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
+#define MVPP2_MIB_OVERSIZE_RCVD 0x68
+#define MVPP2_MIB_JABBER_RCVD 0x6c
+#define MVPP2_MIB_MAC_RCV_ERROR 0x70
+#define MVPP2_MIB_BAD_CRC_EVENT 0x74
+#define MVPP2_MIB_COLLISION 0x78
+#define MVPP2_MIB_LATE_COLLISION 0x7c
+
+#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+
+#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+
+/* Definitions */
+
+/* Shared Packet Processor resources */
+struct mvpp2 {
+ /* Shared registers' base addresses */
+ void __iomem *lms_base;
+ void __iomem *iface_base;
+
+ /* On PPv2.2, each "software thread" can access the base
+ * register through a separate address space, each 64 KB apart
+ * from each other. Typically, such address spaces will be
+ * used per CPU.
+ */
+ void __iomem *swth_base[MVPP2_MAX_THREADS];
+
+ /* On PPv2.2, some port control registers are located into the system
+ * controller space. These registers are accessible through a regmap.
+ */
+ struct regmap *sysctrl_base;
+
+ /* Common clocks */
+ struct clk *pp_clk;
+ struct clk *gop_clk;
+ struct clk *mg_clk;
+ struct clk *mg_core_clk;
+ struct clk *axi_clk;
+
+ /* List of pointers to port structures */
+ int port_count;
+ struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+
+ /* Aggregated TXQs */
+ struct mvpp2_tx_queue *aggr_txqs;
+
+ /* BM pools */
+ struct mvpp2_bm_pool *bm_pools;
+
+ /* PRS shadow table */
+ struct mvpp2_prs_shadow *prs_shadow;
+ /* PRS auxiliary table for double vlan entries control */
+ bool *prs_double_vlans;
+
+ /* Tclk value */
+ u32 tclk;
+
+ /* HW version */
+ enum { MVPP21, MVPP22 } hw_version;
+
+ /* Maximum number of RXQs per port */
+ unsigned int max_port_rxqs;
+
+ /* Workqueue to gather hardware statistics */
+ char queue_name[30];
+ struct workqueue_struct *stats_queue;
+};
+
+struct mvpp2_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
+struct mvpp2_queue_vector {
+ int irq;
+ struct napi_struct napi;
+ enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
+ int sw_thread_id;
+ u16 sw_thread_mask;
+ int first_rxq;
+ int nrxqs;
+ u32 pending_cause_rx;
+ struct mvpp2_port *port;
+};
+
+struct mvpp2_port {
+ u8 id;
+
+ /* Index of the port from the "group of ports" complex point
+ * of view
+ */
+ int gop_id;
+
+ int link_irq;
+
+ struct mvpp2 *priv;
+
+ /* Firmware node associated to the port */
+ struct fwnode_handle *fwnode;
+
+ /* Is a PHY always connected to the port */
+ bool has_phy;
+
+ /* Per-port registers' base address */
+ void __iomem *base;
+ void __iomem *stats_base;
+
+ struct mvpp2_rx_queue **rxqs;
+ unsigned int nrxqs;
+ struct mvpp2_tx_queue **txqs;
+ unsigned int ntxqs;
+ struct net_device *dev;
+
+ int pkt_size;
+
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
+ /* Flags */
+ unsigned long flags;
+
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+ struct mvpp2_pcpu_stats __percpu *stats;
+ u64 *ethtool_stats;
+
+ /* Per-port work and its lock to gather hardware statistics */
+ struct mutex gather_stats_lock;
+ struct delayed_work stats_work;
+
+ struct device_node *of_node;
+
+ phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phy *comphy;
+
+ struct mvpp2_bm_pool *pool_long;
+ struct mvpp2_bm_pool *pool_short;
+
+ /* Index of first port's physical RXQ */
+ u8 first_rxq;
+
+ struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
+ unsigned int nqvecs;
+ bool has_tx_irqs;
+
+ u32 tx_time_coal;
+};
+
+/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT 0
+#define MVPP2_TXD_IP_HLEN_SHIFT 8
+#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE BIT(23)
+#define MVPP2_TXD_L4_UDP BIT(24)
+#define MVPP2_TXD_L3_IP6 BIT(26)
+#define MVPP2_TXD_L_DESC BIT(28)
+#define MVPP2_TXD_F_DESC BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_OVERRUN BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS 16
+#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
+#define MVPP2_RXD_L4_TCP BIT(25)
+#define MVPP2_RXD_L4_UDP BIT(26)
+#define MVPP2_RXD_L3_IP4 BIT(28)
+#define MVPP2_RXD_L3_IP6 BIT(30)
+#define MVPP2_RXD_BUF_HDR BIT(31)
+
+/* HW TX descriptor for PPv2.1 */
+struct mvpp21_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u8 packet_offset; /* the offset from the buffer beginning */
+ u8 phys_txq; /* destination queue ID */
+ u16 data_size; /* data size of transmitted packet in bytes */
+ u32 buf_dma_addr; /* physical addr of transmitted buffer */
+ u32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ u32 reserved2; /* reserved (for future use) */
+};
+
+/* HW RX descriptor for PPv2.1 */
+struct mvpp21_rx_desc {
+ u32 status; /* info about received packet */
+ u16 reserved1; /* parser_info (for future use, PnC) */
+ u16 data_size; /* size of received packet in bytes */
+ u32 buf_dma_addr; /* physical address of the buffer */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ u16 reserved2; /* gem_port_id (for future use, PON) */
+ u16 reserved3; /* csum_l4 (for future use, PnC) */
+ u8 reserved4; /* bm_qset (for future use, BM) */
+ u8 reserved5;
+ u16 reserved6; /* classify_info (for future use, PnC) */
+ u32 reserved7; /* flow_id (for future use, PnC) */
+ u32 reserved8;
+};
+
+/* HW TX descriptor for PPv2.2 */
+struct mvpp22_tx_desc {
+ u32 command;
+ u8 packet_offset;
+ u8 phys_txq;
+ u16 data_size;
+ u64 reserved1;
+ u64 buf_dma_addr_ptp;
+ u64 buf_cookie_misc;
+};
+
+/* HW RX descriptor for PPv2.2 */
+struct mvpp22_rx_desc {
+ u32 status;
+ u16 reserved1;
+ u16 data_size;
+ u32 reserved2;
+ u32 reserved3;
+ u64 buf_dma_addr_key_hash;
+ u64 buf_cookie_misc;
+};
+
+/* Opaque type used by the driver to manipulate the HW TX and RX
+ * descriptors
+ */
+struct mvpp2_tx_desc {
+ union {
+ struct mvpp21_tx_desc pp21;
+ struct mvpp22_tx_desc pp22;
+ };
+};
+
+struct mvpp2_rx_desc {
+ union {
+ struct mvpp21_rx_desc pp21;
+ struct mvpp22_rx_desc pp22;
+ };
+};
+
+struct mvpp2_txq_pcpu_buf {
+ /* Transmitted SKB */
+ struct sk_buff *skb;
+
+ /* Physical address of transmitted buffer */
+ dma_addr_t dma;
+
+ /* Size transmitted */
+ size_t size;
+};
+
+/* Per-CPU Tx queue control */
+struct mvpp2_txq_pcpu {
+ int cpu;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used Tx DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+
+ int wake_threshold;
+ int stop_threshold;
+
+ /* Number of Tx DMA descriptors reserved for each CPU */
+ int reserved_num;
+
+ /* Infos about transmitted buffers */
+ struct mvpp2_txq_pcpu_buf *buffs;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+};
+
+struct mvpp2_tx_queue {
+ /* Physical number of this Tx queue */
+ u8 id;
+
+ /* Logical number of this Tx queue */
+ u8 log_id;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used Tx DMA descriptor in the descriptor ring */
+ int count;
+
+ /* Per-CPU control of physical Tx queues */
+ struct mvpp2_txq_pcpu __percpu *pcpu;
+
+ u32 done_pkts_coal;
+
+ /* Virtual address of thex Tx DMA descriptors array */
+ struct mvpp2_tx_desc *descs;
+
+ /* DMA address of the Tx DMA descriptors array */
+ dma_addr_t descs_dma;
+
+ /* Index of the last Tx DMA descriptor */
+ int last_desc;
+
+ /* Index of the next Tx DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+struct mvpp2_rx_queue {
+ /* RX queue number, in the range 0-31 for physical RXQs */
+ u8 id;
+
+ /* Num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvpp2_rx_desc *descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_dma;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+
+ /* ID of port to which physical RXQ is mapped */
+ int port;
+
+ /* Port's logic RXQ number to which physical RXQ is mapped */
+ int logic_rxq;
+};
+
+struct mvpp2_bm_pool {
+ /* Pool number in the range 0-7 */
+ int id;
+
+ /* Buffer Pointers Pool External (BPPE) size */
+ int size;
+ /* BPPE size in bytes */
+ int size_bytes;
+ /* Number of buffers for this pool */
+ int buf_num;
+ /* Pool buffer size */
+ int buf_size;
+ /* Packet size */
+ int pkt_size;
+ int frag_size;
+
+ /* BPPE virtual base address */
+ u32 *virt_addr;
+ /* BPPE DMA base address */
+ dma_addr_t dma_addr;
+
+ /* Ports using BM pool */
+ u32 port_map;
+};
+
+#define IS_TSO_HEADER(txq_pcpu, addr) \
+ ((addr) >= (txq_pcpu)->tso_headers_dma && \
+ (addr) < (txq_pcpu)->tso_headers_dma + \
+ (txq_pcpu)->size * TSO_HEADER_SIZE)
+
+#define MVPP2_DRIVER_NAME "mvpp2"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
+u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
+
+u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset);
+
+void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data);
+u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
+
+void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
+ u32 data);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
new file mode 100644
index 000000000000..8581d5b17dd5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -0,0 +1,141 @@
+/*
+ * RSS and Classifier helpers for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "mvpp2.h"
+#include "mvpp2_cls.h"
+
+/* Update classification flow table registers */
+static void mvpp2_cls_flow_write(struct mvpp2 *priv,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+}
+
+/* Update classification lookup table register */
+static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+/* Classifier default initialization */
+void mvpp2_cls_init(struct mvpp2 *priv)
+{
+ struct mvpp2_cls_lookup_entry le;
+ struct mvpp2_cls_flow_entry fe;
+ int index;
+
+ /* Enable classifier */
+ mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+ /* Clear classifier flow table */
+ memset(&fe.data, 0, sizeof(fe.data));
+ for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+ fe.index = index;
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* Clear classifier lookup table */
+ le.data = 0;
+ for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+ le.lkpid = index;
+ le.way = 0;
+ mvpp2_cls_lookup_write(priv, &le);
+
+ le.way = 1;
+ mvpp2_cls_lookup_write(priv, &le);
+ }
+}
+
+void mvpp2_cls_port_config(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_lookup_entry le;
+ u32 val;
+
+ /* Set way for the port */
+ val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
+ val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
+
+ /* Pick the entry to be accessed in lookup ID decoding table
+ * according to the way and lkpid.
+ */
+ le.lkpid = port->id;
+ le.way = 0;
+ le.data = 0;
+
+ /* Set initial CPU queue for receiving packets */
+ le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+ le.data |= port->first_rxq;
+
+ /* Disable classification engines */
+ le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ /* Update lookup ID table entry */
+ mvpp2_cls_lookup_write(port->priv, &le);
+}
+
+/* Set CPU queue number for oversize packets */
+void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
+ port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+
+ mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
+ (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
+
+ val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
+ val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
+}
+
+void mvpp22_init_rss(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+
+ /* Loop through the classifier Rx Queues and map them to a RSS table.
+ * Map them all to the first table (0) by default.
+ */
+ for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
+ mvpp2_write(priv, MVPP22_RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(0));
+ }
+
+ /* Configure the first table to evenly distribute the packets across
+ * real Rx Queues. The table entries map a hash to an port Rx Queue.
+ */
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
+ }
+
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
new file mode 100644
index 000000000000..8e1d7f9ffa0b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -0,0 +1,44 @@
+/*
+ * RSS and Classifier definitions for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MVPP2_CLS_H_
+#define _MVPP2_CLS_H_
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE 512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_LKP_TBL_SIZE 64
+#define MVPP2_CLS_RX_QUEUES 256
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES 32
+
+struct mvpp2_cls_flow_entry {
+ u32 index;
+ u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lookup_entry {
+ u32 lkpid;
+ u32 way;
+ u32 data;
+};
+
+void mvpp22_init_rss(struct mvpp2_port *port);
+
+void mvpp2_cls_init(struct mvpp2 *priv);
+
+void mvpp2_cls_port_config(struct mvpp2_port *port);
+
+void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
new file mode 100644
index 000000000000..0319ed9ef8b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -0,0 +1,5281 @@
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/regmap.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tso.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+enum mvpp2_bm_pool_log_num {
+ MVPP2_BM_SHORT,
+ MVPP2_BM_LONG,
+ MVPP2_BM_JUMBO,
+ MVPP2_BM_POOLS_NUM
+};
+
+static struct {
+ int pkt_size;
+ int buf_num;
+} mvpp2_pools[MVPP2_BM_POOLS_NUM];
+
+/* The prototype is added here to be used in start_dev when using ACPI. This
+ * will be removed once phylink is used for all modes (dt+ACPI).
+ */
+static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ const struct phylink_link_state *state);
+
+/* Queue modes */
+#define MVPP2_QDIST_SINGLE_MODE 0
+#define MVPP2_QDIST_MULTI_MODE 1
+
+static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
+module_param(queue_mode, int, 0444);
+MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
+
+/* Utility/helper methods */
+
+void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[0] + offset);
+}
+
+u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->swth_base[0] + offset);
+}
+
+u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+{
+ return readl_relaxed(priv->swth_base[0] + offset);
+}
+/* These accessors should be used to access:
+ *
+ * - per-CPU registers, where each CPU has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific CPU
+ * window, because they are related to an access to a per-CPU
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+ u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[cpu] + offset);
+}
+
+u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+ u32 offset)
+{
+ return readl(priv->swth_base[cpu] + offset);
+}
+
+void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
+ u32 offset, u32 data)
+{
+ writel_relaxed(data, priv->swth_base[cpu] + offset);
+}
+
+static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
+ u32 offset)
+{
+ return readl_relaxed(priv->swth_base[cpu] + offset);
+}
+
+static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.buf_dma_addr;
+ else
+ return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+}
+
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ dma_addr_t dma_addr)
+{
+ dma_addr_t addr, offset;
+
+ addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
+ offset = dma_addr & MVPP2_TX_DESC_ALIGN;
+
+ if (port->priv->hw_version == MVPP21) {
+ tx_desc->pp21.buf_dma_addr = addr;
+ tx_desc->pp21.packet_offset = offset;
+ } else {
+ u64 val = (u64)addr;
+
+ tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+ tx_desc->pp22.buf_dma_addr_ptp |= val;
+ tx_desc->pp22.packet_offset = offset;
+ }
+}
+
+static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.data_size;
+ else
+ return tx_desc->pp22.data_size;
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ size_t size)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.data_size = size;
+ else
+ tx_desc->pp22.data_size = size;
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int txq)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.phys_txq = txq;
+ else
+ tx_desc->pp22.phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int command)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.command = command;
+ else
+ tx_desc->pp22.command = command;
+}
+
+static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.packet_offset;
+ else
+ return tx_desc->pp22.packet_offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_dma_addr;
+ else
+ return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_cookie;
+ else
+ return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.data_size;
+ else
+ return rx_desc->pp22.data_size;
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.status;
+ else
+ return rx_desc->pp22.status;
+}
+
+static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
+{
+ txq_pcpu->txq_get_index++;
+ if (txq_pcpu->txq_get_index == txq_pcpu->size)
+ txq_pcpu->txq_get_index = 0;
+}
+
+static void mvpp2_txq_inc_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_put_index;
+ tx_buf->skb = skb;
+ tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
+ tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
+ mvpp2_txdesc_offset_get(port, tx_desc);
+ txq_pcpu->txq_put_index++;
+ if (txq_pcpu->txq_put_index == txq_pcpu->size)
+ txq_pcpu->txq_put_index = 0;
+}
+
+/* Get number of physical egress port */
+static inline int mvpp2_egress_port(struct mvpp2_port *port)
+{
+ return MVPP2_MAX_TCONT + port->id;
+}
+
+/* Get number of physical TXQ */
+static inline int mvpp2_txq_phys(int port, int txq)
+{
+ return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pool->frag_size);
+ else
+ return kmalloc(pool->frag_size, GFP_ATOMIC);
+}
+
+static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ skb_free_frag(data);
+ else
+ kfree(data);
+}
+
+/* Buffer Manager configuration routines */
+
+/* Create pool */
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+ struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool, int size)
+{
+ u32 val;
+
+ /* Number of buffer pointers must be a multiple of 16, as per
+ * hardware constraints
+ */
+ if (!IS_ALIGNED(size, 16))
+ return -EINVAL;
+
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ * bytes per buffer pointer
+ */
+ if (priv->hw_version == MVPP21)
+ bm_pool->size_bytes = 2 * sizeof(u32) * size;
+ else
+ bm_pool->size_bytes = 2 * sizeof(u64) * size;
+
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+ &bm_pool->dma_addr,
+ GFP_KERNEL);
+ if (!bm_pool->virt_addr)
+ return -ENOMEM;
+
+ if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
+ MVPP2_BM_POOL_PTR_ALIGN)) {
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ bm_pool->virt_addr, bm_pool->dma_addr);
+ dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+ bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+ return -ENOMEM;
+ }
+
+ mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
+ lower_32_bits(bm_pool->dma_addr));
+ mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
+
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+ val |= MVPP2_BM_START_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+ bm_pool->size = size;
+ bm_pool->pkt_size = 0;
+ bm_pool->buf_num = 0;
+
+ return 0;
+}
+
+/* Set pool buffer size */
+static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ int buf_size)
+{
+ u32 val;
+
+ bm_pool->buf_size = buf_size;
+
+ val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
+}
+
+static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *dma_addr,
+ phys_addr_t *phys_addr)
+{
+ int cpu = get_cpu();
+
+ *dma_addr = mvpp2_percpu_read(priv, cpu,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+
+ if (priv->hw_version == MVPP22) {
+ u32 val;
+ u32 dma_addr_highbits, phys_addr_highbits;
+
+ val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+ dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
+ phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
+ MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
+
+ if (sizeof(dma_addr_t) == 8)
+ *dma_addr |= (u64)dma_addr_highbits << 32;
+
+ if (sizeof(phys_addr_t) == 8)
+ *phys_addr |= (u64)phys_addr_highbits << 32;
+ }
+
+ put_cpu();
+}
+
+/* Free all buffers from the pool */
+static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+ int i;
+
+ if (buf_num > bm_pool->buf_num) {
+ WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
+ bm_pool->id, buf_num);
+ buf_num = bm_pool->buf_num;
+ }
+
+ for (i = 0; i < buf_num; i++) {
+ dma_addr_t buf_dma_addr;
+ phys_addr_t buf_phys_addr;
+ void *data;
+
+ mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
+ &buf_dma_addr, &buf_phys_addr);
+
+ dma_unmap_single(dev, buf_dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
+ data = (void *)phys_to_virt(buf_phys_addr);
+ if (!data)
+ break;
+
+ mvpp2_frag_free(bm_pool, data);
+ }
+
+ /* Update BM driver with number of buffers removed from pool */
+ bm_pool->buf_num -= i;
+}
+
+/* Check number of buffers in BM pool */
+static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+{
+ int buf_num = 0;
+
+ buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP22_BM_POOL_PTRS_NUM_MASK;
+ buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP2_BM_BPPI_PTR_NUM_MASK;
+
+ /* HW has one buffer ready which is not reflected in the counters */
+ if (buf_num)
+ buf_num += 1;
+
+ return buf_num;
+}
+
+/* Cleanup pool */
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+ struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ int buf_num;
+ u32 val;
+
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
+
+ /* Check buffer counters after free */
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ if (buf_num) {
+ WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
+ bm_pool->id, bm_pool->buf_num);
+ return 0;
+ }
+
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ bm_pool->virt_addr,
+ bm_pool->dma_addr);
+ return 0;
+}
+
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ int i, err, size;
+ struct mvpp2_bm_pool *bm_pool;
+
+ /* Create all pools with maximum size */
+ size = MVPP2_BM_POOL_SIZE_MAX;
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ bm_pool = &priv->bm_pools[i];
+ bm_pool->id = i;
+ err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
+ if (err)
+ goto err_unroll_pools;
+ mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
+ }
+ return 0;
+
+err_unroll_pools:
+ dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+ for (i = i - 1; i >= 0; i--)
+ mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
+ return err;
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ int i, err;
+
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ /* Mask BM all interrupts */
+ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
+ /* Clear BM cause register */
+ mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+ }
+
+ /* Allocate and initialize BM pools */
+ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+ sizeof(*priv->bm_pools), GFP_KERNEL);
+ if (!priv->bm_pools)
+ return -ENOMEM;
+
+ err = mvpp2_bm_pools_init(pdev, priv);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static void mvpp2_setup_bm_pool(void)
+{
+ /* Short pool */
+ mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
+
+ /* Long pool */
+ mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
+
+ /* Jumbo pool */
+ mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
+}
+
+/* Attach long pool to rxq */
+static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
+ int lrxq, int long_pool)
+{
+ u32 val, mask;
+ int prxq;
+
+ /* Get queue physical ID */
+ prxq = port->rxqs[lrxq]->id;
+
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_LONG_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_LONG_MASK;
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
+ int lrxq, int short_pool)
+{
+ u32 val, mask;
+ int prxq;
+
+ /* Get queue physical ID */
+ prxq = port->rxqs[lrxq]->id;
+
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_SHORT_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_SHORT_MASK;
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+static void *mvpp2_buf_alloc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *buf_dma_addr,
+ phys_addr_t *buf_phys_addr,
+ gfp_t gfp_mask)
+{
+ dma_addr_t dma_addr;
+ void *data;
+
+ data = mvpp2_frag_alloc(bm_pool);
+ if (!data)
+ return NULL;
+
+ dma_addr = dma_map_single(port->dev->dev.parent, data,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ mvpp2_frag_free(bm_pool, data);
+ return NULL;
+ }
+ *buf_dma_addr = dma_addr;
+ *buf_phys_addr = virt_to_phys(data);
+
+ return data;
+}
+
+/* Release buffer to BM */
+static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
+ dma_addr_t buf_dma_addr,
+ phys_addr_t buf_phys_addr)
+{
+ int cpu = get_cpu();
+
+ if (port->priv->hw_version == MVPP22) {
+ u32 val = 0;
+
+ if (sizeof(dma_addr_t) == 8)
+ val |= upper_32_bits(buf_dma_addr) &
+ MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
+
+ if (sizeof(phys_addr_t) == 8)
+ val |= (upper_32_bits(buf_phys_addr)
+ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
+ MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
+
+ mvpp2_percpu_write_relaxed(port->priv, cpu,
+ MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+ }
+
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX
+ * descriptor. Instead of storing the virtual address, we
+ * store the physical address
+ */
+ mvpp2_percpu_write_relaxed(port->priv, cpu,
+ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_percpu_write_relaxed(port->priv, cpu,
+ MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+
+ put_cpu();
+}
+
+/* Allocate buffers for the pool */
+static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+ int i, buf_size, total_size;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
+ void *buf;
+
+ buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+ total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+ if (buf_num < 0 ||
+ (buf_num + bm_pool->buf_num > bm_pool->size)) {
+ netdev_err(port->dev,
+ "cannot allocate %d buffers for pool %d\n",
+ buf_num, bm_pool->id);
+ return 0;
+ }
+
+ for (i = 0; i < buf_num; i++) {
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
+ &phys_addr, GFP_KERNEL);
+ if (!buf)
+ break;
+
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
+ phys_addr);
+ }
+
+ /* Update BM driver with number of buffers added to pool */
+ bm_pool->buf_num += i;
+
+ netdev_dbg(port->dev,
+ "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+ bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+ netdev_dbg(port->dev,
+ "pool %d: %d of %d buffers added\n",
+ bm_pool->id, i, buf_num);
+ return i;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
+{
+ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+ int num;
+
+ if (pool >= MVPP2_BM_POOLS_NUM) {
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
+ return NULL;
+ }
+
+ /* Allocate buffers in case BM pool is used as long pool, but packet
+ * size doesn't match MTU or BM pool hasn't being used yet
+ */
+ if (new_pool->pkt_size == 0) {
+ int pkts_num;
+
+ /* Set default buffer number or free all the buffers in case
+ * the pool is not empty
+ */
+ pkts_num = new_pool->buf_num;
+ if (pkts_num == 0)
+ pkts_num = mvpp2_pools[pool].buf_num;
+ else
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
+ port->priv, new_pool, pkts_num);
+
+ new_pool->pkt_size = pkt_size;
+ new_pool->frag_size =
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
+
+ /* Allocate buffers for this pool */
+ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+ if (num != pkts_num) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ new_pool->id, num, pkts_num);
+ return NULL;
+ }
+ }
+
+ mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+ MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+ return new_pool;
+}
+
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+ int rxq;
+ enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
+
+ /* If port pkt_size is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ long_log_pool = MVPP2_BM_JUMBO;
+ short_log_pool = MVPP2_BM_LONG;
+ } else {
+ long_log_pool = MVPP2_BM_LONG;
+ short_log_pool = MVPP2_BM_SHORT;
+ }
+
+ if (!port->pool_long) {
+ port->pool_long =
+ mvpp2_bm_pool_use(port, long_log_pool,
+ mvpp2_pools[long_log_pool].pkt_size);
+ if (!port->pool_long)
+ return -ENOMEM;
+
+ port->pool_long->port_map |= BIT(port->id);
+
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+ }
+
+ if (!port->pool_short) {
+ port->pool_short =
+ mvpp2_bm_pool_use(port, short_log_pool,
+ mvpp2_pools[short_log_pool].pkt_size);
+ if (!port->pool_short)
+ return -ENOMEM;
+
+ port->pool_short->port_map |= BIT(port->id);
+
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id);
+ }
+
+ return 0;
+}
+
+static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ enum mvpp2_bm_pool_log_num new_long_pool;
+ int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+
+ /* If port MTU is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ new_long_pool = MVPP2_BM_JUMBO;
+ else
+ new_long_pool = MVPP2_BM_LONG;
+
+ if (new_long_pool != port->pool_long->id) {
+ /* Remove port from old short & long pool */
+ port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
+ port->pool_long->pkt_size);
+ port->pool_long->port_map &= ~BIT(port->id);
+ port->pool_long = NULL;
+
+ port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
+ port->pool_short->pkt_size);
+ port->pool_short->port_map &= ~BIT(port->id);
+ port->pool_short = NULL;
+
+ port->pkt_size = pkt_size;
+
+ /* Add port to new short & long pool */
+ mvpp2_swf_bm_pool_init(port);
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
+ }
+
+ dev->mtu = mtu;
+ dev->wanted_features = dev->features;
+
+ netdev_update_features(dev);
+ return 0;
+}
+
+static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
+{
+ int i, sw_thread_mask = 0;
+
+ for (i = 0; i < port->nqvecs; i++)
+ sw_thread_mask |= port->qvecs[i].sw_thread_mask;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
+}
+
+static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
+{
+ int i, sw_thread_mask = 0;
+
+ for (i = 0; i < port->nqvecs; i++)
+ sw_thread_mask |= port->qvecs[i].sw_thread_mask;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
+}
+
+static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
+{
+ struct mvpp2_port *port = qvec->port;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
+}
+
+static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
+{
+ struct mvpp2_port *port = qvec->port;
+
+ mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
+}
+
+/* Mask the current CPU's Rx/Tx interrupts
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
+static void mvpp2_interrupts_mask(void *arg)
+{
+ struct mvpp2_port *port = arg;
+
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts.
+ * Called by on_each_cpu(), guaranteed to run with migration disabled,
+ * using smp_processor_id() is OK.
+ */
+static void mvpp2_interrupts_unmask(void *arg)
+{
+ struct mvpp2_port *port = arg;
+ u32 val;
+
+ val = MVPP2_CAUSE_MISC_SUM_MASK |
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ if (port->has_tx_irqs)
+ val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+}
+
+static void
+mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
+{
+ u32 val;
+ int i;
+
+ if (port->priv->hw_version != MVPP22)
+ return;
+
+ if (mask)
+ val = 0;
+ else
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *v = port->qvecs + i;
+
+ if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
+ continue;
+
+ mvpp2_percpu_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ }
+}
+
+/* Port configuration routines */
+
+static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+ if (port->gop_id == 2)
+ val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ else if (port->gop_id == 3)
+ val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+ regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+}
+
+static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
+ GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ if (port->gop_id > 1) {
+ regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+ if (port->gop_id == 2)
+ val &= ~GENCONF_CTRL0_PORT0_RGMII;
+ else if (port->gop_id == 3)
+ val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
+ regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+ }
+}
+
+static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+ u32 val;
+
+ /* XPCS */
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
+ MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
+ val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
+ writel(val, xpcs + MVPP22_XPCS_CFG0);
+
+ /* MPCS */
+ val = readl(mpcs + MVPP22_MPCS_CTRL);
+ val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
+ writel(val, mpcs + MVPP22_MPCS_CTRL);
+
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+ val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
+ MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+ val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+ val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+}
+
+static int mvpp22_gop_init(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ if (!priv->sysctrl_base)
+ return 0;
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (port->gop_id == 0)
+ goto invalid_conf;
+ mvpp22_gop_init_rgmii(port);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mvpp22_gop_init_sgmii(port);
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ if (port->gop_id != 0)
+ goto invalid_conf;
+ mvpp22_gop_init_10gkr(port);
+ break;
+ default:
+ goto unsupported_conf;
+ }
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
+ val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
+ GENCONF_PORT_CTRL1_EN(port->gop_id);
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
+ val |= GENCONF_SOFT_RESET1_GOP;
+ regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+
+unsupported_conf:
+ return 0;
+
+invalid_conf:
+ netdev_err(port->dev, "Invalid port configuration\n");
+ return -EINVAL;
+}
+
+static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Enable the GMAC link status irq for this port */
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
+ val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
+ writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
+ }
+
+ if (port->gop_id == 0) {
+ /* Enable the XLG/GIG irqs for this port */
+ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
+ if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ val |= MVPP22_XLG_EXT_INT_MASK_XLG;
+ else
+ val |= MVPP22_XLG_EXT_INT_MASK_GIG;
+ writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
+ }
+}
+
+static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
+ val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
+ MVPP22_XLG_EXT_INT_MASK_GIG);
+ writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
+ }
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
+ val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
+ writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
+ }
+}
+
+static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
+{
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ val = readl(port->base + MVPP22_GMAC_INT_MASK);
+ val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
+ writel(val, port->base + MVPP22_GMAC_INT_MASK);
+ }
+
+ if (port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_INT_MASK);
+ val |= MVPP22_XLG_INT_MASK_LINK;
+ writel(val, port->base + MVPP22_XLG_INT_MASK);
+ }
+
+ mvpp22_gop_unmask_irq(port);
+}
+
+/* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
+ *
+ * The PHY mode used by the PPv2 driver comes from the network subsystem, while
+ * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
+ * differ.
+ *
+ * The COMPHY configures the serdes lanes regardless of the actual use of the
+ * lanes by the physical layer. This is why configurations like
+ * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
+ */
+static int mvpp22_comphy_init(struct mvpp2_port *port)
+{
+ enum phy_mode mode;
+ int ret;
+
+ if (!port->comphy)
+ return 0;
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mode = PHY_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mode = PHY_MODE_2500SGMII;
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ mode = PHY_MODE_10GKR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_set_mode(port->comphy, mode);
+ if (ret)
+ return ret;
+
+ return phy_power_on(port->comphy);
+}
+
+static void mvpp2_port_enable(struct mvpp2_port *port)
+{
+ u32 val;
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val |= MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val |= MVPP2_GMAC_PORT_EN_MASK;
+ val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
+}
+
+static void mvpp2_port_disable(struct mvpp2_port *port)
+{
+ u32 val;
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_PORT_EN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+
+ /* Disable & reset should be done separately */
+ val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ }
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
+ ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+static void mvpp2_port_loopback_set(struct mvpp2_port *port,
+ const struct phylink_link_state *state)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+
+ if (state->speed == 1000)
+ val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+ else
+ val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+ if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
+ val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+ else
+ val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+ writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+struct mvpp2_ethtool_counter {
+ unsigned int offset;
+ const char string[ETH_GSTRING_LEN];
+ bool reg_is_64b;
+};
+
+static u64 mvpp2_read_count(struct mvpp2_port *port,
+ const struct mvpp2_ethtool_counter *counter)
+{
+ u64 val;
+
+ val = readl(port->stats_base + counter->offset);
+ if (counter->reg_is_64b)
+ val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
+
+ return val;
+}
+
+/* Due to the fact that software statistics and hardware statistics are, by
+ * design, incremented at different moments in the chain of packet processing,
+ * it is very likely that incoming packets could have been dropped after being
+ * counted by hardware but before reaching software statistics (most probably
+ * multicast packets), and in the oppposite way, during transmission, FCS bytes
+ * are added in between as well as TSO skb will be split and header bytes added.
+ * Hence, statistics gathered from userspace with ifconfig (software) and
+ * ethtool (hardware) cannot be compared.
+ */
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
+ { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
+ { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
+ { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
+ { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
+ { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
+ { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
+ { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
+ { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
+ { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
+ { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
+ { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
+ { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
+ { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
+ { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
+ { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
+ { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
+ { MVPP2_MIB_FC_SENT, "fc_sent" },
+ { MVPP2_MIB_FC_RCVD, "fc_received" },
+ { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
+ { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
+ { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
+ { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
+ { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
+ { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
+ { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
+ { MVPP2_MIB_COLLISION, "collision" },
+ { MVPP2_MIB_LATE_COLLISION, "late_collision" },
+};
+
+static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static void mvpp2_gather_hw_statistics(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
+ stats_work);
+ u64 *pstats;
+ int i;
+
+ mutex_lock(&port->gather_stats_lock);
+
+ pstats = port->ethtool_stats;
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+
+ /* No need to read again the counters right after this function if it
+ * was called asynchronously by the user (ie. use of ethtool).
+ */
+ cancel_delayed_work(&port->stats_work);
+ queue_delayed_work(port->priv->stats_queue, &port->stats_work,
+ MVPP2_MIB_COUNTERS_STATS_DELAY);
+
+ mutex_unlock(&port->gather_stats_lock);
+}
+
+static void mvpp2_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Update statistics for the given port, then take the lock to avoid
+ * concurrent accesses on the ethtool_stats structure during its copy.
+ */
+ mvpp2_gather_hw_statistics(&port->stats_work.work);
+
+ mutex_lock(&port->gather_stats_lock);
+ memcpy(data, port->ethtool_stats,
+ sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
+ mutex_unlock(&port->gather_stats_lock);
+}
+
+static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mvpp2_ethtool_regs);
+
+ return -EOPNOTSUPP;
+}
+
+static void mvpp2_port_reset(struct mvpp2_port *port)
+{
+ u32 val;
+ unsigned int i;
+
+ /* Read the GOP statistics to reset the hardware counters */
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ ~MVPP2_GMAC_PORT_RESET_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+ val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+ MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP22_XLG_CTRL1_REG);
+ val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
+ val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+ MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
+ writel(val, port->base + MVPP22_XLG_CTRL1_REG);
+}
+
+/* Set defaults to the MVPP2 port */
+static void mvpp2_defaults_set(struct mvpp2_port *port)
+{
+ int tx_port_num, val, queue, ptxq, lrxq;
+
+ if (port->priv->hw_version == MVPP21) {
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ /* Min. TX threshold must be less than minimal packet length */
+ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ }
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset */
+ tx_port_num = mvpp2_egress_port(port);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ tx_port_num);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+ /* Close bandwidth for all queues */
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+ ptxq = mvpp2_txq_phys(port->id, queue);
+ mvpp2_write(port->priv,
+ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+ }
+
+ /* Set refill period to 1 usec, refill tokens
+ * and bucket size to maximum
+ */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
+ port->priv->tclk / USEC_PER_SEC);
+ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
+ val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+ val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+ val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
+ val = MVPP2_TXP_TOKEN_SIZE_MAX;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+
+ /* Set MaximumLowLatencyPacketSize value to 256 */
+ mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
+ MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+ MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+ /* Enable Rx cache snoop */
+ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+ val |= MVPP2_SNOOP_PKT_SIZE_MASK |
+ MVPP2_SNOOP_BUF_HDR_MASK;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+
+ /* At default, mask all interrupts to all present cpus */
+ mvpp2_interrupts_disable(port);
+}
+
+/* Enable/disable receiving packets */
+static void mvpp2_ingress_enable(struct mvpp2_port *port)
+{
+ u32 val;
+ int lrxq, queue;
+
+ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+ val &= ~MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+static void mvpp2_ingress_disable(struct mvpp2_port *port)
+{
+ u32 val;
+ int lrxq, queue;
+
+ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+ val |= MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+/* Enable transmit via physical egress queue
+ * - HW starts take descriptors from DRAM
+ */
+static void mvpp2_egress_enable(struct mvpp2_port *port)
+{
+ u32 qmap;
+ int queue;
+ int tx_port_num = mvpp2_egress_port(port);
+
+ /* Enable all initialized TXs. */
+ qmap = 0;
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+ if (txq->descs)
+ qmap |= (1 << queue);
+ }
+
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+static void mvpp2_egress_disable(struct mvpp2_port *port)
+{
+ u32 reg_data;
+ int delay;
+ int tx_port_num = mvpp2_egress_port(port);
+
+ /* Issue stop command for active channels only */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+ reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+ MVPP2_TXP_SCHED_ENQ_MASK;
+ if (reg_data != 0)
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
+ (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+ /* Wait for all Tx activity to terminate. */
+ delay = 0;
+ do {
+ if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(port->dev,
+ "Tx stop timed out, status=0x%08x\n",
+ reg_data);
+ break;
+ }
+ mdelay(1);
+ delay++;
+
+ /* Check port TX Command register that all
+ * Tx queues are stopped
+ */
+ reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
+ } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int
+mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
+{
+ u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+ return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void
+mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
+ int used_count, int free_count)
+{
+ /* Decrement the number of used descriptors and increment count
+ * increment the number of free descriptors.
+ */
+ u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mvpp2_rx_desc *
+mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+ prefetch(rxq->descs + rxq->next_desc_to_proc);
+ return rxq->descs + rx_desc;
+}
+
+/* Set rx queue offset */
+static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
+ int prxq, int offset)
+{
+ u32 val;
+
+ /* Convert offset from bytes to units of 32 bytes */
+ offset = offset >> 5;
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+ /* Offset is in */
+ val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+ MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+ mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Tx descriptors helper methods */
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+static struct mvpp2_tx_desc *
+mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
+{
+ int tx_desc = txq->next_desc_to_proc;
+
+ txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
+ return txq->descs + tx_desc;
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
+static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
+{
+ /* aggregated access - relevant TXQ number is written in TX desc */
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+/* Check if there are enough free descriptors in aggregated txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ *
+ * Called only from mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
+static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+ struct mvpp2_tx_queue *aggr_txq, int num)
+{
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
+ /* Update number of occupied aggregated Tx descriptors */
+ int cpu = smp_processor_id();
+ u32 val = mvpp2_read_relaxed(priv,
+ MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+
+ aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
+
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Reserved Tx descriptors allocation request
+ *
+ * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
+ * only by mvpp2_tx(), so migration is disabled, using
+ * smp_processor_id() is OK.
+ */
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+ struct mvpp2_tx_queue *txq, int num)
+{
+ u32 val;
+ int cpu = smp_processor_id();
+
+ val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+ mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
+
+ val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
+
+ return val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Check if there are enough reserved descriptors for transmission.
+ * If not, request chunk of reserved descriptors and check again.
+ */
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ int num)
+{
+ int req, cpu, desc_count;
+
+ if (txq_pcpu->reserved_num >= num)
+ return 0;
+
+ /* Not enough descriptors reserved! Update the reserved descriptor
+ * count and check again.
+ */
+
+ desc_count = 0;
+ /* Compute total of used descriptors */
+ for_each_present_cpu(cpu) {
+ struct mvpp2_txq_pcpu *txq_pcpu_aux;
+
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+ desc_count += txq_pcpu_aux->count;
+ desc_count += txq_pcpu_aux->reserved_num;
+ }
+
+ req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+ desc_count += req;
+
+ if (desc_count >
+ (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+ return -ENOMEM;
+
+ txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+
+ /* OK, the descriptor could have been updated: check again. */
+ if (txq_pcpu->reserved_num < num)
+ return -ENOMEM;
+ return 0;
+}
+
+/* Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type required only for checksum calculation
+ */
+ command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+ command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+ command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+ if (l3_proto == swab16(ETH_P_IP)) {
+ command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
+ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
+ } else {
+ command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
+ }
+
+ if (l4_proto == IPPROTO_TCP) {
+ command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else if (l4_proto == IPPROTO_UDP) {
+ command |= MVPP2_TXD_L4_UDP; /* enable UDP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else {
+ command |= MVPP2_TXD_L4_CSUM_NOT;
+ }
+
+ return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ *
+ * Called only from mvpp2_txq_done(), called from mvpp2_tx()
+ * (migration disabled) and from the TX completion tasklet (migration
+ * disabled) so using smp_processor_id() is OK.
+ */
+static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ u32 val;
+
+ /* Reading status reg resets transmitted descriptor counter */
+ val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(txq->id));
+
+ return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
+ MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+/* Called through on_each_cpu(), so runs on all CPUs, with migration
+ * disabled, therefore using smp_processor_id() is OK.
+ */
+static void mvpp2_txq_sent_counter_clear(void *arg)
+{
+ struct mvpp2_port *port = arg;
+ int queue;
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ int id = port->txqs[queue]->id;
+
+ mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(id));
+ }
+}
+
+/* Set max sizes for Tx queues */
+static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
+{
+ u32 val, size, mtu;
+ int txq, tx_port_num;
+
+ mtu = port->pkt_size * 8;
+ if (mtu > MVPP2_TXP_MTU_MAX)
+ mtu = MVPP2_TXP_MTU_MAX;
+
+ /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+ mtu = 3 * mtu;
+
+ /* Indirect access to registers */
+ tx_port_num = mvpp2_egress_port(port);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+ /* Set MTU */
+ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
+ val &= ~MVPP2_TXP_MTU_MAX;
+ val |= mtu;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
+
+ /* TXP token size and all TXQs token size must be larger that MTU */
+ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+ size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+ val |= size;
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+ }
+
+ for (txq = 0; txq < port->ntxqs; txq++) {
+ val = mvpp2_read(port->priv,
+ MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+ size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvpp2_write(port->priv,
+ MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+ val);
+ }
+ }
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ int cpu = get_cpu();
+
+ if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
+ rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
+
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
+
+ put_cpu();
+}
+
+/* For some reason in the LSP this is done on each CPU. Why ? */
+static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ int cpu = get_cpu();
+ u32 val;
+
+ if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
+ txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+
+ val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
+
+ put_cpu();
+}
+
+static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
+{
+ u64 tmp = (u64)clk_hz * usec;
+
+ do_div(tmp, USEC_PER_SEC);
+
+ return tmp > U32_MAX ? U32_MAX : tmp;
+}
+
+static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
+{
+ u64 tmp = (u64)cycles * USEC_PER_SEC;
+
+ do_div(tmp, clk_hz);
+
+ return tmp > U32_MAX ? U32_MAX : tmp;
+}
+
+/* Set the time delay in usec before Rx interrupt */
+static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ unsigned long freq = port->priv->tclk;
+ u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
+
+ if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
+ rxq->time_coal =
+ mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
+
+ /* re-evaluate to get actual register value */
+ val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
+ }
+
+ mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+}
+
+static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
+{
+ unsigned long freq = port->priv->tclk;
+ u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
+
+ if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
+ port->tx_time_coal =
+ mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
+
+ /* re-evaluate to get actual register value */
+ val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
+ }
+
+ mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
+}
+
+/* Free Tx queue skbuffs */
+static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_txq_pcpu *txq_pcpu, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_get_index;
+
+ if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+ tx_buf->size, DMA_TO_DEVICE);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
+ }
+}
+
+static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return port->rxqs[queue];
+}
+
+static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return port->txqs[queue];
+}
+
+/* Handle end of transmission */
+static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+ struct mvpp2_txq_pcpu *txq_pcpu)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
+ int tx_done;
+
+ if (txq_pcpu->cpu != smp_processor_id())
+ netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
+
+ tx_done = mvpp2_txq_sent_desc_proc(port, txq);
+ if (!tx_done)
+ return;
+ mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
+
+ txq_pcpu->count -= tx_done;
+
+ if (netif_tx_queue_stopped(nq))
+ if (txq_pcpu->count <= txq_pcpu->wake_threshold)
+ netif_tx_wake_queue(nq);
+}
+
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
+ int cpu)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Allocate and initialize descriptors for aggr TXQ */
+static int mvpp2_aggr_txq_init(struct platform_device *pdev,
+ struct mvpp2_tx_queue *aggr_txq, int cpu,
+ struct mvpp2 *priv)
+{
+ u32 txq_dma;
+
+ /* Allocate memory for TX descriptors */
+ aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
+ if (!aggr_txq->descs)
+ return -ENOMEM;
+
+ aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
+
+ /* Aggr TXQ no reset WA */
+ aggr_txq->next_desc_to_proc = mvpp2_read(priv,
+ MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+
+ /* Set Tx descriptors queue starting address indirect
+ * access
+ */
+ if (priv->hw_version == MVPP21)
+ txq_dma = aggr_txq->descs_dma;
+ else
+ txq_dma = aggr_txq->descs_dma >>
+ MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
+
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
+ MVPP2_AGGR_TXQ_SIZE);
+
+ return 0;
+}
+
+/* Create a specified Rx queue */
+static int mvpp2_rxq_init(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+
+{
+ u32 rxq_dma;
+ int cpu;
+
+ rxq->size = port->rx_ring_size;
+
+ /* Allocate memory for RX descriptors */
+ rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
+ rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+ &rxq->descs_dma, GFP_KERNEL);
+ if (!rxq->descs)
+ return -ENOMEM;
+
+ rxq->last_desc = rxq->size - 1;
+
+ /* Zero occupied and non-occupied counters - direct access */
+ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+
+ /* Set Rx descriptors queue starting address - indirect access */
+ cpu = get_cpu();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ if (port->priv->hw_version == MVPP21)
+ rxq_dma = rxq->descs_dma;
+ else
+ rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+ put_cpu();
+
+ /* Set Offset */
+ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
+
+ /* Set coalescing pkts and time */
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
+
+ /* Add number of descriptors ready for receiving packets */
+ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
+
+ return 0;
+}
+
+/* Push packets received by the RXQ to BM pool */
+static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ int rx_received, i;
+
+ rx_received = mvpp2_rxq_received(port, rxq->id);
+ if (!rx_received)
+ return;
+
+ for (i = 0; i < rx_received; i++) {
+ struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ int pool;
+
+ pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
+
+ mvpp2_bm_pool_put(port, pool,
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+ mvpp2_rxdesc_cookie_get(port, rx_desc));
+ }
+ mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
+}
+
+/* Cleanup Rx queue */
+static void mvpp2_rxq_deinit(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ int cpu;
+
+ mvpp2_rxq_drop_pkts(port, rxq);
+
+ if (rxq->descs)
+ dma_free_coherent(port->dev->dev.parent,
+ rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+ rxq->descs,
+ rxq->descs_dma);
+
+ rxq->descs = NULL;
+ rxq->last_desc = 0;
+ rxq->next_desc_to_proc = 0;
+ rxq->descs_dma = 0;
+
+ /* Clear Rx descriptors queue starting address and size;
+ * free descriptor number
+ */
+ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+ cpu = get_cpu();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ put_cpu();
+}
+
+/* Create and initialize a Tx queue */
+static int mvpp2_txq_init(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ u32 val;
+ int cpu, desc, desc_per_txq, tx_port_num;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+
+ txq->size = port->tx_ring_size;
+
+ /* Allocate memory for Tx descriptors */
+ txq->descs = dma_alloc_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ &txq->descs_dma, GFP_KERNEL);
+ if (!txq->descs)
+ return -ENOMEM;
+
+ txq->last_desc = txq->size - 1;
+
+ /* Set Tx descriptors queue starting address - indirect access */
+ cpu = get_cpu();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+ txq->descs_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+ txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+ txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
+ val &= ~MVPP2_TXQ_PENDING_MASK;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
+
+ /* Calculate base address in prefetch buffer. We reserve 16 descriptors
+ * for each existing TXQ.
+ * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
+ * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
+ */
+ desc_per_txq = 16;
+ desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
+ (txq->log_id * desc_per_txq);
+
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+ MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
+ put_cpu();
+
+ /* WRR / EJP configuration - indirect access */
+ tx_port_num = mvpp2_egress_port(port);
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+ val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+ val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+ val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+ val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
+
+ val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+ val);
+
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ txq_pcpu->size = txq->size;
+ txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
+ sizeof(*txq_pcpu->buffs),
+ GFP_KERNEL);
+ if (!txq_pcpu->buffs)
+ return -ENOMEM;
+
+ txq_pcpu->count = 0;
+ txq_pcpu->reserved_num = 0;
+ txq_pcpu->txq_put_index = 0;
+ txq_pcpu->txq_get_index = 0;
+ txq_pcpu->tso_headers = NULL;
+
+ txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
+ txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
+
+ txq_pcpu->tso_headers =
+ dma_alloc_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ &txq_pcpu->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txq_pcpu->tso_headers)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Free allocated TXQ resources */
+static void mvpp2_txq_deinit(struct mvpp2_port *port,
+ struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->buffs);
+
+ if (txq_pcpu->tso_headers)
+ dma_free_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ txq_pcpu->tso_headers,
+ txq_pcpu->tso_headers_dma);
+
+ txq_pcpu->tso_headers = NULL;
+ }
+
+ if (txq->descs)
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_dma);
+
+ txq->descs = NULL;
+ txq->last_desc = 0;
+ txq->next_desc_to_proc = 0;
+ txq->descs_dma = 0;
+
+ /* Set minimum bandwidth for disabled TXQs */
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+
+ /* Set Tx descriptors queue starting address and size */
+ cpu = get_cpu();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ put_cpu();
+}
+
+/* Cleanup Tx ports */
+static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ int delay, pending, cpu;
+ u32 val;
+
+ cpu = get_cpu();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
+ val |= MVPP2_TXQ_DRAIN_EN_MASK;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+
+ /* The napi queue has been stopped so wait for all packets
+ * to be transmitted.
+ */
+ delay = 0;
+ do {
+ if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+ netdev_warn(port->dev,
+ "port %d: cleaning queue %d timed out\n",
+ port->id, txq->log_id);
+ break;
+ }
+ mdelay(1);
+ delay++;
+
+ pending = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_TXQ_PENDING_REG);
+ pending &= MVPP2_TXQ_PENDING_MASK;
+ } while (pending);
+
+ val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+ put_cpu();
+
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+
+ /* Release all packets */
+ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
+
+ /* Reset queue */
+ txq_pcpu->count = 0;
+ txq_pcpu->txq_put_index = 0;
+ txq_pcpu->txq_get_index = 0;
+ }
+}
+
+/* Cleanup all Tx queues */
+static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq;
+ int queue;
+ u32 val;
+
+ val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
+
+ /* Reset Tx ports and delete Tx queues */
+ val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ mvpp2_txq_clean(port, txq);
+ mvpp2_txq_deinit(port, txq);
+ }
+
+ on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+
+ val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
+ mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+}
+
+/* Cleanup all Rx queues */
+static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
+{
+ int queue;
+
+ for (queue = 0; queue < port->nrxqs; queue++)
+ mvpp2_rxq_deinit(port, port->rxqs[queue]);
+}
+
+/* Init all Rx queues for port */
+static int mvpp2_setup_rxqs(struct mvpp2_port *port)
+{
+ int queue, err;
+
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ err = mvpp2_rxq_init(port, port->rxqs[queue]);
+ if (err)
+ goto err_cleanup;
+ }
+ return 0;
+
+err_cleanup:
+ mvpp2_cleanup_rxqs(port);
+ return err;
+}
+
+/* Init all tx queues for port */
+static int mvpp2_setup_txqs(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq;
+ int queue, err;
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ err = mvpp2_txq_init(port, txq);
+ if (err)
+ goto err_cleanup;
+ }
+
+ if (port->has_tx_irqs) {
+ mvpp2_tx_time_coal_set(port);
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ mvpp2_tx_pkts_coal_set(port, txq);
+ }
+ }
+
+ on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+ return 0;
+
+err_cleanup:
+ mvpp2_cleanup_txqs(port);
+ return err;
+}
+
+/* The callback for per-port interrupt */
+static irqreturn_t mvpp2_isr(int irq, void *dev_id)
+{
+ struct mvpp2_queue_vector *qv = dev_id;
+
+ mvpp2_qvec_interrupt_disable(qv);
+
+ napi_schedule(&qv->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* Per-port interrupt for link status changes */
+static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
+{
+ struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+ struct net_device *dev = port->dev;
+ bool event = false, link = false;
+ u32 val;
+
+ mvpp22_gop_mask_irq(port);
+
+ if (port->gop_id == 0 &&
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
+ val = readl(port->base + MVPP22_XLG_INT_STAT);
+ if (val & MVPP22_XLG_INT_STAT_LINK) {
+ event = true;
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ if (val & MVPP22_XLG_STATUS_LINK_UP)
+ link = true;
+ }
+ } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ val = readl(port->base + MVPP22_GMAC_INT_STAT);
+ if (val & MVPP22_GMAC_INT_STAT_LINK) {
+ event = true;
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+ if (val & MVPP2_GMAC_STATUS0_LINK_UP)
+ link = true;
+ }
+ }
+
+ if (port->phylink) {
+ phylink_mac_change(port->phylink, link);
+ goto handled;
+ }
+
+ if (!netif_running(dev) || !event)
+ goto handled;
+
+ if (link) {
+ mvpp2_interrupts_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ netif_carrier_on(dev);
+ netif_tx_wake_all_queues(dev);
+ } else {
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ mvpp2_ingress_disable(port);
+ mvpp2_egress_disable(port);
+
+ mvpp2_interrupts_disable(port);
+ }
+
+handled:
+ mvpp22_gop_unmask_irq(port);
+ return IRQ_HANDLED;
+}
+
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << port->ntxqs) - 1;
+ tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
+/* Main RX/TX processing routines */
+
+/* Display more error info */
+static void mvpp2_rx_error(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
+ char *err_str = NULL;
+
+ switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+ case MVPP2_RXD_ERR_CRC:
+ err_str = "crc";
+ break;
+ case MVPP2_RXD_ERR_OVERRUN:
+ err_str = "overrun";
+ break;
+ case MVPP2_RXD_ERR_RESOURCE:
+ err_str = "resource";
+ break;
+ }
+ if (err_str && net_ratelimit())
+ netdev_err(port->dev,
+ "bad rx status %08x (%s error), size=%zu\n",
+ status, err_str, sz);
+}
+
+/* Handle RX checksum offload */
+static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
+ struct sk_buff *skb)
+{
+ if (((status & MVPP2_RXD_L3_IP4) &&
+ !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+ (status & MVPP2_RXD_L3_IP6))
+ if (((status & MVPP2_RXD_L4_UDP) ||
+ (status & MVPP2_RXD_L4_TCP)) &&
+ (status & MVPP2_RXD_L4_CSUM_OK)) {
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+static int mvpp2_rx_refill(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool, int pool)
+{
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
+ void *buf;
+
+ /* No recycle or too many buffers are in use, so allocate a new skb */
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
+ GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ u8 l4_proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+ if (skb_network_header_len(skb) > 0)
+ ip_hdr_len = (skb_network_header_len(skb) >> 2);
+ l4_proto = ip6h->nexthdr;
+ } else {
+ return MVPP2_TXD_L4_CSUM_NOT;
+ }
+
+ return mvpp2_txq_desc_csum(skb_network_offset(skb),
+ skb->protocol, ip_hdr_len, l4_proto);
+ }
+
+ return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+}
+
+/* Main rx processing */
+static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ int rx_todo, struct mvpp2_rx_queue *rxq)
+{
+ struct net_device *dev = port->dev;
+ int rx_received;
+ int rx_done = 0;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
+
+ /* Get number of received packets and clamp the to-do */
+ rx_received = mvpp2_rxq_received(port, rxq->id);
+ if (rx_todo > rx_received)
+ rx_todo = rx_received;
+
+ while (rx_done < rx_todo) {
+ struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ struct mvpp2_bm_pool *bm_pool;
+ struct sk_buff *skb;
+ unsigned int frag_size;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
+ u32 rx_status;
+ int pool, rx_bytes, err;
+ void *data;
+
+ rx_done++;
+ rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+ rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+ rx_bytes -= MVPP2_MH_SIZE;
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+
+ pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
+ bm_pool = &port->priv->bm_pools[pool];
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ continue;
+ }
+
+ if (bm_pool->frag_size > PAGE_SIZE)
+ frag_size = 0;
+ else
+ frag_size = bm_pool->frag_size;
+
+ skb = build_skb(data, frag_size);
+ if (!skb) {
+ netdev_warn(port->dev, "skb build failed\n");
+ goto err_drop_frame;
+ }
+
+ err = mvpp2_rx_refill(port, bm_pool, pool);
+ if (err) {
+ netdev_err(port->dev, "failed to refill BM pools\n");
+ goto err_drop_frame;
+ }
+
+ dma_unmap_single(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
+ skb_put(skb, rx_bytes);
+ skb->protocol = eth_type_trans(skb, dev);
+ mvpp2_rx_csum(port, rx_status, skb);
+
+ napi_gro_receive(napi, skb);
+ }
+
+ if (rcvd_pkts) {
+ struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += rcvd_pkts;
+ stats->rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ /* Update Rx queue management counters */
+ wmb();
+ mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
+
+ return rx_todo;
+}
+
+static inline void
+tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_desc *desc)
+{
+ struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ dma_addr_t buf_dma_addr =
+ mvpp2_txdesc_dma_addr_get(port, desc);
+ size_t buf_sz =
+ mvpp2_txdesc_size_get(port, desc);
+ if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
+ mvpp2_txq_desc_put(txq);
+}
+
+/* Handle tx fragmentation processing */
+static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+ struct mvpp2_tx_desc *tx_desc;
+ int i;
+ dma_addr_t buf_dma_addr;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = page_address(frag->page.p) + frag->page_offset;
+
+ tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+
+ buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
+ frag->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
+ mvpp2_txq_desc_put(txq);
+ goto cleanup;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
+
+ if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ /* Last descriptor */
+ mvpp2_txdesc_cmd_set(port, tx_desc,
+ MVPP2_TXD_L_DESC);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+ }
+ }
+
+ return 0;
+cleanup:
+ /* Release all descriptors that were used to map fragments of
+ * this packet, as well as the corresponding DMA mappings
+ */
+ for (i = i - 1; i >= 0; i--) {
+ tx_desc = txq->descs + i;
+ tx_desc_unmap_put(port, txq, tx_desc);
+ }
+
+ return -ENOMEM;
+}
+
+static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
+ struct net_device *dev,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ int hdr_sz)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ dma_addr_t addr;
+
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
+
+ addr = txq_pcpu->tso_headers_dma +
+ txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
+
+ mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
+ MVPP2_TXD_F_DESC |
+ MVPP2_TXD_PADDING_DISABLE);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+}
+
+static inline int mvpp2_tso_put_data(struct sk_buff *skb,
+ struct net_device *dev, struct tso_t *tso,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ int sz, bool left, bool last)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ dma_addr_t buf_dma_addr;
+
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, sz);
+
+ buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
+ mvpp2_txq_desc_put(txq);
+ return -ENOMEM;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
+
+ if (!left) {
+ mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
+ if (last) {
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+ return 0;
+ }
+ } else {
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ }
+
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+ return 0;
+}
+
+static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
+ struct mvpp2_tx_queue *txq,
+ struct mvpp2_tx_queue *aggr_txq,
+ struct mvpp2_txq_pcpu *txq_pcpu)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct tso_t tso;
+ int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int i, len, descs = 0;
+
+ /* Check number of available descriptors */
+ if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
+ tso_count_descs(skb)) ||
+ mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
+ tso_count_descs(skb)))
+ return 0;
+
+ tso_start(skb, &tso);
+ len = skb->len - hdr_sz;
+ while (len > 0) {
+ int left = min_t(int, skb_shinfo(skb)->gso_size, len);
+ char *hdr = txq_pcpu->tso_headers +
+ txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
+
+ len -= left;
+ descs++;
+
+ tso_build_hdr(skb, hdr, &tso, left, len == 0);
+ mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
+
+ while (left > 0) {
+ int sz = min_t(int, tso.size, left);
+ left -= sz;
+ descs++;
+
+ if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
+ txq_pcpu, sz, left, len == 0))
+ goto release;
+ tso_build_data(skb, &tso, sz);
+ }
+ }
+
+ return descs;
+
+release:
+ for (i = descs - 1; i >= 0; i--) {
+ struct mvpp2_tx_desc *tx_desc = txq->descs + i;
+ tx_desc_unmap_put(port, txq, tx_desc);
+ }
+ return 0;
+}
+
+/* Main tx processing */
+static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_queue *txq, *aggr_txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ struct mvpp2_tx_desc *tx_desc;
+ dma_addr_t buf_dma_addr;
+ int frags = 0;
+ u16 txq_id;
+ u32 tx_cmd;
+
+ txq_id = skb_get_queue_mapping(skb);
+ txq = port->txqs[txq_id];
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+ aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+
+ if (skb_is_gso(skb)) {
+ frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
+ goto out;
+ }
+ frags = skb_shinfo(skb)->nr_frags + 1;
+
+ /* Check number of available descriptors */
+ if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
+ mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
+ txq_pcpu, frags)) {
+ frags = 0;
+ goto out;
+ }
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
+
+ buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
+ mvpp2_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
+
+ tx_cmd = mvpp2_skb_tx_csum(port, skb);
+
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+
+ /* Continue with other skb fragments */
+ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
+ tx_desc_unmap_put(port, txq, tx_desc);
+ frags = 0;
+ }
+ }
+
+out:
+ if (frags > 0) {
+ struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+ struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+ txq_pcpu->reserved_num -= frags;
+ txq_pcpu->count += frags;
+ aggr_txq->count += frags;
+
+ /* Enable transmit */
+ wmb();
+ mvpp2_aggr_txq_pend_desc_add(port, frags);
+
+ if (txq_pcpu->count >= txq_pcpu->stop_threshold)
+ netif_tx_stop_queue(nq);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Finalize TX processing */
+ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
+ txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static inline void mvpp2_cause_error(struct net_device *dev, int cause)
+{
+ if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+ netdev_err(dev, "FCS error\n");
+ if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+ netdev_err(dev, "rx fifo overrun error\n");
+ if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+ netdev_err(dev, "tx fifo underrun error\n");
+}
+
+static int mvpp2_poll(struct napi_struct *napi, int budget)
+{
+ u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
+ struct mvpp2_queue_vector *qv;
+ int cpu = smp_processor_id();
+
+ qv = container_of(napi, struct mvpp2_queue_vector, napi);
+
+ /* Rx/Tx cause register
+ *
+ * Bits 0-15: each bit indicates received packets on the Rx queue
+ * (bit 0 is for Rx queue 0).
+ *
+ * Bits 16-23: each bit indicates transmitted packets on the Tx queue
+ * (bit 16 is for Tx queue 0).
+ *
+ * Each CPU has its own Rx/Tx cause register
+ */
+ cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+
+ cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
+ if (cause_misc) {
+ mvpp2_cause_error(port->dev, cause_misc);
+
+ /* Clear the cause register */
+ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+ cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ }
+
+ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ if (cause_tx) {
+ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ }
+
+ /* Process RX packets */
+ cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx <<= qv->first_rxq;
+ cause_rx |= qv->pending_cause_rx;
+ while (cause_rx && budget > 0) {
+ int count;
+ struct mvpp2_rx_queue *rxq;
+
+ rxq = mvpp2_get_rx_queue(port, cause_rx);
+ if (!rxq)
+ break;
+
+ count = mvpp2_rx(port, napi, budget, rxq);
+ rx_done += count;
+ budget -= count;
+ if (budget > 0) {
+ /* Clear the bit associated to this Rx queue
+ * so that next iteration will continue from
+ * the next Rx queue.
+ */
+ cause_rx &= ~(1 << rxq->logic_rxq);
+ }
+ }
+
+ if (budget > 0) {
+ cause_rx = 0;
+ napi_complete_done(napi, rx_done);
+
+ mvpp2_qvec_interrupt_enable(qv);
+ }
+ qv->pending_cause_rx = cause_rx;
+ return rx_done;
+}
+
+static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
+{
+ u32 ctrl3;
+
+ /* comphy reconfiguration */
+ mvpp22_comphy_init(port);
+
+ /* gop reconfiguration */
+ mvpp22_gop_init(port);
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0) {
+ ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+
+ if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
+ else
+ ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
+ writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
+ }
+
+ if (port->gop_id == 0 &&
+ (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR))
+ mvpp2_xlg_max_rx_size_set(port);
+ else
+ mvpp2_gmac_max_rx_size_set(port);
+}
+
+/* Set hw internals when starting port */
+static void mvpp2_start_dev(struct mvpp2_port *port)
+{
+ int i;
+
+ mvpp2_txp_max_tx_size_set(port);
+
+ for (i = 0; i < port->nqvecs; i++)
+ napi_enable(&port->qvecs[i].napi);
+
+ /* Enable interrupts on all CPUs */
+ mvpp2_interrupts_enable(port);
+
+ if (port->priv->hw_version == MVPP22)
+ mvpp22_mode_reconfigure(port);
+
+ if (port->phylink) {
+ phylink_start(port->phylink);
+ } else {
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ .link = 1,
+ };
+ mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
+ }
+
+ netif_tx_start_all_queues(port->dev);
+}
+
+/* Set hw internals when stopping port */
+static void mvpp2_stop_dev(struct mvpp2_port *port)
+{
+ int i;
+
+ /* Disable interrupts on all CPUs */
+ mvpp2_interrupts_disable(port);
+
+ for (i = 0; i < port->nqvecs; i++)
+ napi_disable(&port->qvecs[i].napi);
+
+ if (port->phylink)
+ phylink_stop(port->phylink);
+ phy_power_off(port->comphy);
+}
+
+static int mvpp2_check_ringparam_valid(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ u16 new_rx_pending = ring->rx_pending;
+ u16 new_tx_pending = ring->tx_pending;
+
+ if (ring->rx_pending == 0 || ring->tx_pending == 0)
+ return -EINVAL;
+
+ if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+ new_rx_pending = MVPP2_MAX_RXD_MAX;
+ else if (!IS_ALIGNED(ring->rx_pending, 16))
+ new_rx_pending = ALIGN(ring->rx_pending, 16);
+
+ if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+ new_tx_pending = MVPP2_MAX_TXD_MAX;
+ else if (!IS_ALIGNED(ring->tx_pending, 32))
+ new_tx_pending = ALIGN(ring->tx_pending, 32);
+
+ /* The Tx ring size cannot be smaller than the minimum number of
+ * descriptors needed for TSO.
+ */
+ if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+ new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
+ if (ring->rx_pending != new_rx_pending) {
+ netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
+ ring->rx_pending, new_rx_pending);
+ ring->rx_pending = new_rx_pending;
+ }
+
+ if (ring->tx_pending != new_tx_pending) {
+ netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
+ ring->tx_pending, new_tx_pending);
+ ring->tx_pending = new_tx_pending;
+ }
+
+ return 0;
+}
+
+static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+{
+ u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+ mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+ mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
+ mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
+ addr[0] = (mac_addr_h >> 24) & 0xFF;
+ addr[1] = (mac_addr_h >> 16) & 0xFF;
+ addr[2] = (mac_addr_h >> 8) & 0xFF;
+ addr[3] = mac_addr_h & 0xFF;
+ addr[4] = mac_addr_m & 0xFF;
+ addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+static int mvpp2_irqs_init(struct mvpp2_port *port)
+{
+ int err, i;
+
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+
+ err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
+ if (err)
+ goto err;
+
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ irq_set_affinity_hint(qv->irq,
+ cpumask_of(qv->sw_thread_id));
+ }
+
+ return 0;
+err:
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ irq_set_affinity_hint(qv->irq, NULL);
+ free_irq(qv->irq, qv);
+ }
+
+ return err;
+}
+
+static void mvpp2_irqs_deinit(struct mvpp2_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ irq_set_affinity_hint(qv->irq, NULL);
+ irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
+ free_irq(qv->irq, qv);
+ }
+}
+
+static int mvpp2_open(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2 *priv = port->priv;
+ unsigned char mac_bcast[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ bool valid = false;
+ int err;
+
+ err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
+ return err;
+ }
+ err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
+ return err;
+ }
+ err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
+ return err;
+ }
+ err = mvpp2_prs_def_flow(port);
+ if (err) {
+ netdev_err(dev, "mvpp2_prs_def_flow failed\n");
+ return err;
+ }
+
+ /* Allocate the Rx/Tx queues */
+ err = mvpp2_setup_rxqs(port);
+ if (err) {
+ netdev_err(port->dev, "cannot allocate Rx queues\n");
+ return err;
+ }
+
+ err = mvpp2_setup_txqs(port);
+ if (err) {
+ netdev_err(port->dev, "cannot allocate Tx queues\n");
+ goto err_cleanup_rxqs;
+ }
+
+ err = mvpp2_irqs_init(port);
+ if (err) {
+ netdev_err(port->dev, "cannot init IRQs\n");
+ goto err_cleanup_txqs;
+ }
+
+ /* Phylink isn't supported yet in ACPI mode */
+ if (port->of_node) {
+ err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+ if (err) {
+ netdev_err(port->dev, "could not attach PHY (%d)\n",
+ err);
+ goto err_free_irq;
+ }
+
+ valid = true;
+ }
+
+ if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
+ err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
+ dev->name, port);
+ if (err) {
+ netdev_err(port->dev, "cannot request link IRQ %d\n",
+ port->link_irq);
+ goto err_free_irq;
+ }
+
+ mvpp22_gop_setup_irq(port);
+
+ /* In default link is down */
+ netif_carrier_off(port->dev);
+
+ valid = true;
+ } else {
+ port->link_irq = 0;
+ }
+
+ if (!valid) {
+ netdev_err(port->dev,
+ "invalid configuration: no dt or link IRQ");
+ goto err_free_irq;
+ }
+
+ /* Unmask interrupts on all CPUs */
+ on_each_cpu(mvpp2_interrupts_unmask, port, 1);
+ mvpp2_shared_interrupt_mask_unmask(port, false);
+
+ mvpp2_start_dev(port);
+
+ if (priv->hw_version == MVPP22)
+ mvpp22_init_rss(port);
+
+ /* Start hardware statistics gathering */
+ queue_delayed_work(priv->stats_queue, &port->stats_work,
+ MVPP2_MIB_COUNTERS_STATS_DELAY);
+
+ return 0;
+
+err_free_irq:
+ mvpp2_irqs_deinit(port);
+err_cleanup_txqs:
+ mvpp2_cleanup_txqs(port);
+err_cleanup_rxqs:
+ mvpp2_cleanup_rxqs(port);
+ return err;
+}
+
+static int mvpp2_stop(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+
+ mvpp2_stop_dev(port);
+
+ /* Mask interrupts on all CPUs */
+ on_each_cpu(mvpp2_interrupts_mask, port, 1);
+ mvpp2_shared_interrupt_mask_unmask(port, true);
+
+ if (port->phylink)
+ phylink_disconnect_phy(port->phylink);
+ if (port->link_irq)
+ free_irq(port->link_irq, port);
+
+ mvpp2_irqs_deinit(port);
+ if (!port->has_tx_irqs) {
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
+ }
+ mvpp2_cleanup_rxqs(port);
+ mvpp2_cleanup_txqs(port);
+
+ cancel_delayed_work_sync(&port->stats_work);
+
+ return 0;
+}
+
+static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
+ struct netdev_hw_addr_list *list)
+{
+ struct netdev_hw_addr *ha;
+ int ret;
+
+ netdev_hw_addr_list_for_each(ha, list) {
+ ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
+{
+ if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ mvpp2_prs_vid_enable_filtering(port);
+ else
+ mvpp2_prs_vid_disable_filtering(port);
+
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_UNI_CAST, enable);
+
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_MULTI_CAST, enable);
+}
+
+static void mvpp2_set_rx_mode(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Clear the whole UC and MC list */
+ mvpp2_prs_mac_del_all(port);
+
+ if (dev->flags & IFF_PROMISC) {
+ mvpp2_set_rx_promisc(port, true);
+ return;
+ }
+
+ mvpp2_set_rx_promisc(port, false);
+
+ if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
+ mvpp2_prs_mac_da_accept_list(port, &dev->uc))
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_UNI_CAST, true);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_MULTI_CAST, true);
+ return;
+ }
+
+ if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
+ mvpp2_prs_mac_da_accept_list(port, &dev->mc))
+ mvpp2_prs_mac_promisc_set(port->priv, port->id,
+ MVPP2_PRS_L2_MULTI_CAST, true);
+}
+
+static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+{
+ const struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+ if (err) {
+ /* Reconfigure parser accept the original MAC address */
+ mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+ netdev_err(dev, "failed to change MAC address\n");
+ }
+ return err;
+}
+
+static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int err;
+
+ if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+ ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+ }
+
+ if (!netif_running(dev)) {
+ err = mvpp2_bm_update_mtu(dev, mtu);
+ if (!err) {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ return 0;
+ }
+
+ /* Reconfigure BM to the original MTU */
+ err = mvpp2_bm_update_mtu(dev, dev->mtu);
+ if (err)
+ goto log_error;
+ }
+
+ mvpp2_stop_dev(port);
+
+ err = mvpp2_bm_update_mtu(dev, mtu);
+ if (!err) {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ goto out_start;
+ }
+
+ /* Reconfigure BM to the original MTU */
+ err = mvpp2_bm_update_mtu(dev, dev->mtu);
+ if (err)
+ goto log_error;
+
+out_start:
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+
+ return 0;
+log_error:
+ netdev_err(dev, "failed to change MTU\n");
+ return err;
+}
+
+static void
+mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvpp2_pcpu_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(port->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_mii_ioctl(port->phylink, ifr, cmd);
+}
+
+static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = mvpp2_prs_vid_entry_add(port, vid);
+ if (ret)
+ netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
+ MVPP2_PRS_VLAN_FILT_MAX - 1);
+ return ret;
+}
+
+static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ mvpp2_prs_vid_entry_remove(port, vid);
+ return 0;
+}
+
+static int mvpp2_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ mvpp2_prs_vid_enable_filtering(port);
+ } else {
+ /* Invalidate all registered VID filters for this
+ * port
+ */
+ mvpp2_prs_vid_remove_all(port);
+
+ mvpp2_prs_vid_disable_filtering(port);
+ }
+ }
+
+ return 0;
+}
+
+/* Ethtool methods */
+
+static int mvpp2_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_nway_reset(port->phylink);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int queue;
+
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
+ }
+
+ if (port->has_tx_irqs) {
+ port->tx_time_coal = c->tx_coalesce_usecs;
+ mvpp2_tx_time_coal_set(port);
+ }
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+ txq->done_pkts_coal = c->tx_max_coalesced_frames;
+
+ if (port->has_tx_irqs)
+ mvpp2_tx_pkts_coal_set(port, txq);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
+ c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+ c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->tx_coalesce_usecs = port->tx_time_coal;
+ return 0;
+}
+
+static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+ ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
+ ring->rx_pending = port->rx_ring_size;
+ ring->tx_pending = port->tx_ring_size;
+}
+
+static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u16 prev_rx_ring_size = port->rx_ring_size;
+ u16 prev_tx_ring_size = port->tx_ring_size;
+ int err;
+
+ err = mvpp2_check_ringparam_valid(dev, ring);
+ if (err)
+ return err;
+
+ if (!netif_running(dev)) {
+ port->rx_ring_size = ring->rx_pending;
+ port->tx_ring_size = ring->tx_pending;
+ return 0;
+ }
+
+ /* The interface is running, so we have to force a
+ * reallocation of the queues
+ */
+ mvpp2_stop_dev(port);
+ mvpp2_cleanup_rxqs(port);
+ mvpp2_cleanup_txqs(port);
+
+ port->rx_ring_size = ring->rx_pending;
+ port->tx_ring_size = ring->tx_pending;
+
+ err = mvpp2_setup_rxqs(port);
+ if (err) {
+ /* Reallocate Rx queues with the original ring size */
+ port->rx_ring_size = prev_rx_ring_size;
+ ring->rx_pending = prev_rx_ring_size;
+ err = mvpp2_setup_rxqs(port);
+ if (err)
+ goto err_out;
+ }
+ err = mvpp2_setup_txqs(port);
+ if (err) {
+ /* Reallocate Tx queues with the original ring size */
+ port->tx_ring_size = prev_tx_ring_size;
+ ring->tx_pending = prev_tx_ring_size;
+ err = mvpp2_setup_txqs(port);
+ if (err)
+ goto err_clean_rxqs;
+ }
+
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+
+ return 0;
+
+err_clean_rxqs:
+ mvpp2_cleanup_rxqs(port);
+err_out:
+ netdev_err(dev, "failed to change ring parameters");
+ return err;
+}
+
+static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return;
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+/* Device ops */
+
+static const struct net_device_ops mvpp2_netdev_ops = {
+ .ndo_open = mvpp2_open,
+ .ndo_stop = mvpp2_stop,
+ .ndo_start_xmit = mvpp2_tx,
+ .ndo_set_rx_mode = mvpp2_set_rx_mode,
+ .ndo_set_mac_address = mvpp2_set_mac_address,
+ .ndo_change_mtu = mvpp2_change_mtu,
+ .ndo_get_stats64 = mvpp2_get_stats64,
+ .ndo_do_ioctl = mvpp2_ioctl,
+ .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
+ .ndo_set_features = mvpp2_set_features,
+};
+
+static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .nway_reset = mvpp2_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .set_coalesce = mvpp2_ethtool_set_coalesce,
+ .get_coalesce = mvpp2_ethtool_get_coalesce,
+ .get_drvinfo = mvpp2_ethtool_get_drvinfo,
+ .get_ringparam = mvpp2_ethtool_get_ringparam,
+ .set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_strings = mvpp2_ethtool_get_strings,
+ .get_ethtool_stats = mvpp2_ethtool_get_stats,
+ .get_sset_count = mvpp2_ethtool_get_sset_count,
+ .get_pauseparam = mvpp2_ethtool_get_pause_param,
+ .set_pauseparam = mvpp2_ethtool_set_pause_param,
+ .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
+ .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+};
+
+/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
+ * had a single IRQ defined per-port.
+ */
+static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
+ struct device_node *port_node)
+{
+ struct mvpp2_queue_vector *v = &port->qvecs[0];
+
+ v->first_rxq = 0;
+ v->nrxqs = port->nrxqs;
+ v->type = MVPP2_QUEUE_VECTOR_SHARED;
+ v->sw_thread_id = 0;
+ v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
+ v->port = port;
+ v->irq = irq_of_parse_and_map(port_node, 0);
+ if (v->irq <= 0)
+ return -EINVAL;
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll,
+ NAPI_POLL_WEIGHT);
+
+ port->nqvecs = 1;
+
+ return 0;
+}
+
+static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
+ struct device_node *port_node)
+{
+ struct mvpp2_queue_vector *v;
+ int i, ret;
+
+ port->nqvecs = num_possible_cpus();
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ port->nqvecs += 1;
+
+ for (i = 0; i < port->nqvecs; i++) {
+ char irqname[16];
+
+ v = port->qvecs + i;
+
+ v->port = port;
+ v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
+ v->sw_thread_id = i;
+ v->sw_thread_mask = BIT(i);
+
+ snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+
+ if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
+ v->first_rxq = i * MVPP2_DEFAULT_RXQ;
+ v->nrxqs = MVPP2_DEFAULT_RXQ;
+ } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
+ i == (port->nqvecs - 1)) {
+ v->first_rxq = 0;
+ v->nrxqs = port->nrxqs;
+ v->type = MVPP2_QUEUE_VECTOR_SHARED;
+ strncpy(irqname, "rx-shared", sizeof(irqname));
+ }
+
+ if (port_node)
+ v->irq = of_irq_get_byname(port_node, irqname);
+ else
+ v->irq = fwnode_irq_get(port->fwnode, i);
+ if (v->irq <= 0) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll,
+ NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < port->nqvecs; i++)
+ irq_dispose_mapping(port->qvecs[i].irq);
+ return ret;
+}
+
+static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
+ struct device_node *port_node)
+{
+ if (port->has_tx_irqs)
+ return mvpp2_multi_queue_vectors_init(port, port_node);
+ else
+ return mvpp2_simple_queue_vectors_init(port, port_node);
+}
+
+static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->nqvecs; i++)
+ irq_dispose_mapping(port->qvecs[i].irq);
+}
+
+/* Configure Rx queue group interrupt for this port */
+static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+ int i;
+
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
+ port->nrxqs);
+ return;
+ }
+
+ /* Handle the more complicated PPv2.2 case */
+ for (i = 0; i < port->nqvecs; i++) {
+ struct mvpp2_queue_vector *qv = port->qvecs + i;
+
+ if (!qv->nrxqs)
+ continue;
+
+ val = qv->sw_thread_id;
+ val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = qv->first_rxq;
+ val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
+}
+
+/* Initialize port HW */
+static int mvpp2_port_init(struct mvpp2_port *port)
+{
+ struct device *dev = port->dev->dev.parent;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ int queue, cpu, err;
+
+ /* Checks for hardware constraints */
+ if (port->first_rxq + port->nrxqs >
+ MVPP2_MAX_PORTS * priv->max_port_rxqs)
+ return -EINVAL;
+
+ if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
+ (port->ntxqs > MVPP2_MAX_TXQ))
+ return -EINVAL;
+
+ /* Disable port */
+ mvpp2_egress_disable(port);
+ mvpp2_port_disable(port);
+
+ port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
+
+ port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
+ GFP_KERNEL);
+ if (!port->txqs)
+ return -ENOMEM;
+
+ /* Associate physical Tx queues to this port and initialize.
+ * The mapping is predefined.
+ */
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ int queue_phy_id = mvpp2_txq_phys(port->id, queue);
+ struct mvpp2_tx_queue *txq;
+
+ txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+
+ txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
+ if (!txq->pcpu) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+
+ txq->id = queue_phy_id;
+ txq->log_id = queue;
+ txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ txq_pcpu->cpu = cpu;
+ }
+
+ port->txqs[queue] = txq;
+ }
+
+ port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
+ GFP_KERNEL);
+ if (!port->rxqs) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+
+ /* Allocate and initialize Rx queue for this port */
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ struct mvpp2_rx_queue *rxq;
+
+ /* Map physical Rx queue to port's logical Rx queue */
+ rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
+ if (!rxq) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
+ /* Map this Rx queue to a physical queue */
+ rxq->id = port->first_rxq + queue;
+ rxq->port = port->id;
+ rxq->logic_rxq = queue;
+
+ port->rxqs[queue] = rxq;
+ }
+
+ mvpp2_rx_irqs_setup(port);
+
+ /* Create Rx descriptor rings */
+ for (queue = 0; queue < port->nrxqs; queue++) {
+ struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+ rxq->size = port->rx_ring_size;
+ rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+ rxq->time_coal = MVPP2_RX_COAL_USEC;
+ }
+
+ mvpp2_ingress_disable(port);
+
+ /* Port default configuration */
+ mvpp2_defaults_set(port);
+
+ /* Port's classifier configuration */
+ mvpp2_cls_oversize_rxq_set(port);
+ mvpp2_cls_port_config(port);
+
+ /* Provide an initial Rx packet size */
+ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
+
+ /* Initialize pools for swf */
+ err = mvpp2_swf_bm_pool_init(port);
+ if (err)
+ goto err_free_percpu;
+
+ return 0;
+
+err_free_percpu:
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ if (!port->txqs[queue])
+ continue;
+ free_percpu(port->txqs[queue]->pcpu);
+ }
+ return err;
+}
+
+/* Checks if the port DT description has the TX interrupts
+ * described. On PPv2.1, there are no such interrupts. On PPv2.2,
+ * there are available, but we need to keep support for old DTs.
+ */
+static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
+ struct device_node *port_node)
+{
+ char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
+ "tx-cpu2", "tx-cpu3" };
+ int ret, i;
+
+ if (priv->hw_version == MVPP21)
+ return false;
+
+ for (i = 0; i < 5; i++) {
+ ret = of_property_match_string(port_node, "interrupt-names",
+ irqs[i]);
+ if (ret < 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
+ struct fwnode_handle *fwnode,
+ char **mac_from)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ char hw_mac_addr[ETH_ALEN] = {0};
+ char fw_mac_addr[ETH_ALEN];
+
+ if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ *mac_from = "firmware node";
+ ether_addr_copy(dev->dev_addr, fw_mac_addr);
+ return;
+ }
+
+ if (priv->hw_version == MVPP21) {
+ mvpp21_get_mac_address(port, hw_mac_addr);
+ if (is_valid_ether_addr(hw_mac_addr)) {
+ *mac_from = "hardware";
+ ether_addr_copy(dev->dev_addr, hw_mac_addr);
+ return;
+ }
+ }
+
+ *mac_from = "random";
+ eth_hw_addr_random(dev);
+}
+
+static void mvpp2_phylink_validate(struct net_device *dev,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ /* Fall-through */
+ default:
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10000baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void mvpp22_xlg_link_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
+{
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static void mvpp2_gmac_link_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_phylink_mac_link_state(struct net_device *dev,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+
+ if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
+ mvpp22_xlg_link_state(port, state);
+ return 1;
+ }
+ }
+
+ mvpp2_gmac_link_state(port, state);
+ return 1;
+}
+
+static void mvpp2_mac_an_restart(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u32 val;
+
+ if (port->phy_interface != PHY_INTERFACE_MODE_SGMII)
+ return;
+
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ /* The RESTART_AN bit is cleared by the h/w after restarting the AN
+ * process.
+ */
+ val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u32 ctrl0, ctrl4;
+
+ ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
+
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+
+ ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
+ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+
+ writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
+ writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
+}
+
+static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u32 an, ctrl0, ctrl2, ctrl4;
+
+ an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+ ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+
+ /* Force link down */
+ an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ an |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* Set the GMAC in a reset state */
+ ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+ an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
+ MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+ ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
+ ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
+ * they negotiate duplex: they are always operating with a fixed
+ * speed of 1000/2500Mbps in full duplex, so force 1000/2500
+ * speed and full duplex here.
+ */
+ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
+ an |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ } else if (!phy_interface_mode_is_rgmii(state->interface)) {
+ an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+ }
+
+ if (state->duplex)
+ an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ if (phylink_test(state->advertising, Pause))
+ an |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(state->advertising, Asym_Pause))
+ an |= MVPP2_GMAC_FC_ADV_ASM_EN;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ an |= MVPP2_GMAC_IN_BAND_AUTONEG;
+ ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
+
+ ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+ MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_DP_CLK_SEL |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+ } else if (phy_interface_mode_is_rgmii(state->interface)) {
+ an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS;
+
+ if (state->speed == SPEED_1000)
+ an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (state->speed == SPEED_100)
+ an |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+ ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+ MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ }
+
+ writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+ writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Check for invalid configuration */
+ if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
+ netdev_err(dev, "Invalid mode on %s\n", dev->name);
+ return;
+ }
+
+ netif_tx_stop_all_queues(port->dev);
+ if (!port->has_phy)
+ netif_carrier_off(port->dev);
+
+ /* Make sure the port is disabled when reconfiguring the mode */
+ mvpp2_port_disable(port);
+
+ if (port->priv->hw_version == MVPP22 &&
+ port->phy_interface != state->interface) {
+ port->phy_interface = state->interface;
+
+ /* Reconfigure the serdes lanes */
+ phy_power_off(port->comphy);
+ mvpp22_mode_reconfigure(port);
+ }
+
+ /* mac (re)configuration */
+ if (state->interface == PHY_INTERFACE_MODE_10GKR)
+ mvpp2_xlg_config(port, mode, state);
+ else if (phy_interface_mode_is_rgmii(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mvpp2_gmac_config(port, mode, state);
+
+ if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
+ mvpp2_port_loopback_set(port, state);
+
+ /* If the port already was up, make sure it's still in the same state */
+ if (state->link || !port->has_phy) {
+ mvpp2_port_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ if (!port->has_phy)
+ netif_carrier_on(dev);
+ netif_tx_wake_all_queues(dev);
+ }
+}
+
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u32 val;
+
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_10GKR) {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+ if (phy_interface_mode_is_rgmii(interface))
+ val |= MVPP2_GMAC_FORCE_LINK_PASS;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
+ mvpp2_port_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ netif_tx_wake_all_queues(dev);
+}
+
+static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u32 val;
+
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_10GKR) {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
+ netif_tx_stop_all_queues(dev);
+ mvpp2_egress_disable(port);
+ mvpp2_ingress_disable(port);
+
+ /* When using link interrupts to notify phylink of a MAC state change,
+ * we do not want the port to be disabled (we want to receive further
+ * interrupts, to be notified when the port will have a link later).
+ */
+ if (!port->has_phy)
+ return;
+
+ mvpp2_port_disable(port);
+}
+
+static const struct phylink_mac_ops mvpp2_phylink_ops = {
+ .validate = mvpp2_phylink_validate,
+ .mac_link_state = mvpp2_phylink_mac_link_state,
+ .mac_an_restart = mvpp2_mac_an_restart,
+ .mac_config = mvpp2_mac_config,
+ .mac_link_up = mvpp2_mac_link_up,
+ .mac_link_down = mvpp2_mac_link_down,
+};
+
+/* Ports initialization */
+static int mvpp2_port_probe(struct platform_device *pdev,
+ struct fwnode_handle *port_fwnode,
+ struct mvpp2 *priv)
+{
+ struct phy *comphy = NULL;
+ struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
+ struct device_node *port_node = to_of_node(port_fwnode);
+ struct net_device *dev;
+ struct resource *res;
+ struct phylink *phylink;
+ char *mac_from = "";
+ unsigned int ntxqs, nrxqs;
+ bool has_tx_irqs;
+ u32 id;
+ int features;
+ int phy_mode;
+ int err, i, cpu;
+
+ if (port_node) {
+ has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ } else {
+ has_tx_irqs = true;
+ queue_mode = MVPP2_QDIST_MULTI_MODE;
+ }
+
+ if (!has_tx_irqs)
+ queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
+ ntxqs = MVPP2_MAX_TXQ;
+ if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
+ nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
+ else
+ nrxqs = MVPP2_DEFAULT_RXQ;
+
+ dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
+ if (!dev)
+ return -ENOMEM;
+
+ phy_mode = fwnode_get_phy_mode(port_fwnode);
+ if (phy_mode < 0) {
+ dev_err(&pdev->dev, "incorrect phy mode\n");
+ err = phy_mode;
+ goto err_free_netdev;
+ }
+
+ if (port_node) {
+ comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+ if (IS_ERR(comphy)) {
+ if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+ comphy = NULL;
+ }
+ }
+
+ if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing port-id value\n");
+ goto err_free_netdev;
+ }
+
+ dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvpp2_netdev_ops;
+ dev->ethtool_ops = &mvpp2_eth_tool_ops;
+
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->fwnode = port_fwnode;
+ port->has_phy = !!of_find_property(port_node, "phy", NULL);
+ port->ntxqs = ntxqs;
+ port->nrxqs = nrxqs;
+ port->priv = priv;
+ port->has_tx_irqs = has_tx_irqs;
+
+ err = mvpp2_queue_vectors_init(port, port_node);
+ if (err)
+ goto err_free_netdev;
+
+ if (port_node)
+ port->link_irq = of_irq_get_byname(port_node, "link");
+ else
+ port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
+ if (port->link_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_deinit_qvecs;
+ }
+ if (port->link_irq <= 0)
+ /* the link irq is optional */
+ port->link_irq = 0;
+
+ if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
+ port->flags |= MVPP2_F_LOOPBACK;
+
+ port->id = id;
+ if (priv->hw_version == MVPP21)
+ port->first_rxq = port->id * port->nrxqs;
+ else
+ port->first_rxq = port->id * priv->max_port_rxqs;
+
+ port->of_node = port_node;
+ port->phy_interface = phy_mode;
+ port->comphy = comphy;
+
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+ port->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->base)) {
+ err = PTR_ERR(port->base);
+ goto err_free_irq;
+ }
+
+ port->stats_base = port->priv->lms_base +
+ MVPP21_MIB_COUNTERS_OFFSET +
+ port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
+ } else {
+ if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
+ &port->gop_id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing gop-port-id value\n");
+ goto err_deinit_qvecs;
+ }
+
+ port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
+ port->stats_base = port->priv->iface_base +
+ MVPP22_MIB_COUNTERS_OFFSET +
+ port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
+ }
+
+ /* Alloc per-cpu and ethtool stats */
+ port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
+ if (!port->stats) {
+ err = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ port->ethtool_stats = devm_kcalloc(&pdev->dev,
+ ARRAY_SIZE(mvpp2_ethtool_regs),
+ sizeof(u64), GFP_KERNEL);
+ if (!port->ethtool_stats) {
+ err = -ENOMEM;
+ goto err_free_stats;
+ }
+
+ mutex_init(&port->gather_stats_lock);
+ INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
+
+ mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+
+ port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+ port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = mvpp2_port_init(port);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to init port %d\n", id);
+ goto err_free_stats;
+ }
+
+ mvpp2_port_periodic_xon_disable(port);
+
+ mvpp2_port_reset(port);
+
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ if (!port->has_tx_irqs) {
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet,
+ mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+ }
+
+ features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO;
+ dev->features = features | NETIF_F_RXCSUM;
+ dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
+
+ dev->vlan_features |= features;
+ dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* MTU range: 68 - 9704 */
+ dev->min_mtu = ETH_MIN_MTU;
+ /* 9704 == 9728 - 20 and rounding to 8 */
+ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+
+ /* Phylink isn't used w/ ACPI as of now */
+ if (port_node) {
+ phylink = phylink_create(dev, port_fwnode, phy_mode,
+ &mvpp2_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_free_port_pcpu;
+ }
+ port->phylink = phylink;
+ } else {
+ port->phylink = NULL;
+ }
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register netdev\n");
+ goto err_phylink;
+ }
+ netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
+
+ priv->port_list[priv->port_count++] = port;
+
+ return 0;
+
+err_phylink:
+ if (port->phylink)
+ phylink_destroy(port->phylink);
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
+err_free_txq_pcpu:
+ for (i = 0; i < port->ntxqs; i++)
+ free_percpu(port->txqs[i]->pcpu);
+err_free_stats:
+ free_percpu(port->stats);
+err_free_irq:
+ if (port->link_irq)
+ irq_dispose_mapping(port->link_irq);
+err_deinit_qvecs:
+ mvpp2_queue_vectors_deinit(port);
+err_free_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+/* Ports removal routine */
+static void mvpp2_port_remove(struct mvpp2_port *port)
+{
+ int i;
+
+ unregister_netdev(port->dev);
+ if (port->phylink)
+ phylink_destroy(port->phylink);
+ free_percpu(port->pcpu);
+ free_percpu(port->stats);
+ for (i = 0; i < port->ntxqs; i++)
+ free_percpu(port->txqs[i]->pcpu);
+ mvpp2_queue_vectors_deinit(port);
+ if (port->link_irq)
+ irq_dispose_mapping(port->link_irq);
+ free_netdev(port->dev);
+}
+
+/* Initialize decoding windows */
+static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
+ struct mvpp2 *priv)
+{
+ u32 win_enable;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
+ mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
+ }
+
+ win_enable = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvpp2_write(priv, MVPP2_WIN_BASE(i),
+ (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id);
+
+ mvpp2_write(priv, MVPP2_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
+
+ win_enable |= (1 << i);
+ }
+
+ mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Initialize Rx FIFO's */
+static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
+{
+ int port;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ }
+
+ mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+ MVPP2_RX_FIFO_PORT_MIN_PKT);
+ mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+{
+ int port;
+
+ /* The FIFO size parameters are set depending on the maximum speed a
+ * given port can handle:
+ * - Port 0: 10Gbps
+ * - Port 1: 2.5Gbps
+ * - Ports 2 and 3: 1Gbps
+ */
+
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
+
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
+
+ for (port = 2; port < MVPP2_MAX_PORTS; port++) {
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ }
+
+ mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+ MVPP2_RX_FIFO_PORT_MIN_PKT);
+ mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
+ * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
+ * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
+ */
+static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
+{
+ int port, size, thrs;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ if (port == 0) {
+ size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
+ thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
+ } else {
+ size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
+ thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
+ }
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
+ }
+}
+
+static void mvpp2_axi_init(struct mvpp2 *priv)
+{
+ u32 val, rdval, wrval;
+
+ mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
+
+ /* AXI Bridge Configuration */
+
+ rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ /* BM */
+ mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
+
+ /* Descriptors */
+ mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
+
+ /* Buffer Data */
+ mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
+
+ val = MVPP22_AXI_CODE_CACHE_NON_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+ mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
+ mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
+}
+
+/* Initialize network controller common part HW */
+static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ const struct mbus_dram_target_info *dram_target_info;
+ int err, i;
+ u32 val;
+
+ /* MBUS windows configuration */
+ dram_target_info = mv_mbus_dram_info();
+ if (dram_target_info)
+ mvpp2_conf_mbus_windows(dram_target_info, priv);
+
+ if (priv->hw_version == MVPP22)
+ mvpp2_axi_init(priv);
+
+ /* Disable HW PHY polling */
+ if (priv->hw_version == MVPP21) {
+ val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+ writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ } else {
+ val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ val &= ~MVPP22_SMI_POLLING_EN;
+ writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ }
+
+ /* Allocate and initialize aggregated TXQs */
+ priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+ sizeof(*priv->aggr_txqs),
+ GFP_KERNEL);
+ if (!priv->aggr_txqs)
+ return -ENOMEM;
+
+ for_each_present_cpu(i) {
+ priv->aggr_txqs[i].id = i;
+ priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
+ err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
+ if (err < 0)
+ return err;
+ }
+
+ /* Fifo Init */
+ if (priv->hw_version == MVPP21) {
+ mvpp2_rx_fifo_init(priv);
+ } else {
+ mvpp22_rx_fifo_init(priv);
+ mvpp22_tx_fifo_init(priv);
+ }
+
+ if (priv->hw_version == MVPP21)
+ writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+ priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+
+ /* Allow cache snoop when transmiting packets */
+ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
+
+ /* Buffer Manager initialization */
+ err = mvpp2_bm_init(pdev, priv);
+ if (err < 0)
+ return err;
+
+ /* Parser default initialization */
+ err = mvpp2_prs_default_init(pdev, priv);
+ if (err < 0)
+ return err;
+
+ /* Classifier default initialization */
+ mvpp2_cls_init(priv);
+
+ return 0;
+}
+
+static int mvpp2_probe(struct platform_device *pdev)
+{
+ const struct acpi_device_id *acpi_id;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
+ struct mvpp2 *priv;
+ struct resource *res;
+ void __iomem *base;
+ int i;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ priv->hw_version = (unsigned long)acpi_id->driver_data;
+ } else {
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->lms_base))
+ return PTR_ERR(priv->lms_base);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (has_acpi_companion(&pdev->dev)) {
+ /* In case the MDIO memory region is declared in
+ * the ACPI, it can already appear as 'in-use'
+ * in the OS. Because it is overlapped by second
+ * region of the network controller, make
+ * sure it is released, before requesting it again.
+ * The care is taken by mvpp2 driver to avoid
+ * concurrent access to this memory region.
+ */
+ release_resource(res);
+ }
+ priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iface_base))
+ return PTR_ERR(priv->iface_base);
+ }
+
+ if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
+ priv->sysctrl_base =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "marvell,system-controller");
+ if (IS_ERR(priv->sysctrl_base))
+ /* The system controller regmap is optional for dt
+ * compatibility reasons. When not provided, the
+ * configuration of the GoP relies on the
+ * firmware/bootloader.
+ */
+ priv->sysctrl_base = NULL;
+ }
+
+ mvpp2_setup_bm_pool();
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ u32 addr_space_sz;
+
+ addr_space_sz = (priv->hw_version == MVPP21 ?
+ MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
+ priv->swth_base[i] = base + i * addr_space_sz;
+ }
+
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
+
+ if (dev_of_node(&pdev->dev)) {
+ priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+ if (IS_ERR(priv->pp_clk))
+ return PTR_ERR(priv->pp_clk);
+ err = clk_prepare_enable(priv->pp_clk);
+ if (err < 0)
+ return err;
+
+ priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+ if (IS_ERR(priv->gop_clk)) {
+ err = PTR_ERR(priv->gop_clk);
+ goto err_pp_clk;
+ }
+ err = clk_prepare_enable(priv->gop_clk);
+ if (err < 0)
+ goto err_pp_clk;
+
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+
+ priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
+ if (IS_ERR(priv->mg_core_clk)) {
+ priv->mg_core_clk = NULL;
+ } else {
+ err = clk_prepare_enable(priv->mg_core_clk);
+ if (err < 0)
+ goto err_mg_clk;
+ }
+ }
+
+ priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ if (err == -EPROBE_DEFER)
+ goto err_mg_core_clk;
+ priv->axi_clk = NULL;
+ } else {
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err < 0)
+ goto err_mg_core_clk;
+ }
+
+ /* Get system's tclk rate */
+ priv->tclk = clk_get_rate(priv->pp_clk);
+ } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->tclk)) {
+ dev_err(&pdev->dev, "missing clock-frequency value\n");
+ return -EINVAL;
+ }
+
+ if (priv->hw_version == MVPP22) {
+ err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
+ if (err)
+ goto err_axi_clk;
+ /* Sadly, the BM pools all share the same register to
+ * store the high 32 bits of their address. So they
+ * must all have the same high 32 bits, which forces
+ * us to restrict coherent memory to DMA_BIT_MASK(32).
+ */
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_axi_clk;
+ }
+
+ /* Initialize network controller */
+ err = mvpp2_init(pdev, priv);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize controller\n");
+ goto err_axi_clk;
+ }
+
+ /* Initialize ports */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ err = mvpp2_port_probe(pdev, port_fwnode, priv);
+ if (err < 0)
+ goto err_port_probe;
+ }
+
+ if (priv->port_count == 0) {
+ dev_err(&pdev->dev, "no ports enabled\n");
+ err = -ENODEV;
+ goto err_axi_clk;
+ }
+
+ /* Statistics must be gathered regularly because some of them (like
+ * packets counters) are 32-bit registers and could overflow quite
+ * quickly. For instance, a 10Gb link used at full bandwidth with the
+ * smallest packets (64B) will overflow a 32-bit counter in less than
+ * 30 seconds. Then, use a workqueue to fill 64-bit counters.
+ */
+ snprintf(priv->queue_name, sizeof(priv->queue_name),
+ "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
+ priv->port_count > 1 ? "+" : "");
+ priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
+ if (!priv->stats_queue) {
+ err = -ENOMEM;
+ goto err_port_probe;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+err_port_probe:
+ i = 0;
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (priv->port_list[i])
+ mvpp2_port_remove(priv->port_list[i]);
+ i++;
+ }
+err_axi_clk:
+ clk_disable_unprepare(priv->axi_clk);
+
+err_mg_core_clk:
+ if (priv->hw_version == MVPP22)
+ clk_disable_unprepare(priv->mg_core_clk);
+err_mg_clk:
+ if (priv->hw_version == MVPP22)
+ clk_disable_unprepare(priv->mg_clk);
+err_gop_clk:
+ clk_disable_unprepare(priv->gop_clk);
+err_pp_clk:
+ clk_disable_unprepare(priv->pp_clk);
+ return err;
+}
+
+static int mvpp2_remove(struct platform_device *pdev)
+{
+ struct mvpp2 *priv = platform_get_drvdata(pdev);
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
+ int i = 0;
+
+ flush_workqueue(priv->stats_queue);
+ destroy_workqueue(priv->stats_queue);
+
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (priv->port_list[i]) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+ mvpp2_port_remove(priv->port_list[i]);
+ }
+ i++;
+ }
+
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
+
+ mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
+ }
+
+ for_each_present_cpu(i) {
+ struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
+
+ dma_free_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ aggr_txq->descs,
+ aggr_txq->descs_dma);
+ }
+
+ if (is_acpi_node(port_fwnode))
+ return 0;
+
+ clk_disable_unprepare(priv->axi_clk);
+ clk_disable_unprepare(priv->mg_core_clk);
+ clk_disable_unprepare(priv->mg_clk);
+ clk_disable_unprepare(priv->pp_clk);
+ clk_disable_unprepare(priv->gop_clk);
+
+ return 0;
+}
+
+static const struct of_device_id mvpp2_match[] = {
+ {
+ .compatible = "marvell,armada-375-pp2",
+ .data = (void *)MVPP21,
+ },
+ {
+ .compatible = "marvell,armada-7k-pp22",
+ .data = (void *)MVPP22,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvpp2_match);
+
+static const struct acpi_device_id mvpp2_acpi_match[] = {
+ { "MRVL0110", MVPP22 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+
+static struct platform_driver mvpp2_driver = {
+ .probe = mvpp2_probe,
+ .remove = mvpp2_remove,
+ .driver = {
+ .name = MVPP2_DRIVER_NAME,
+ .of_match_table = mvpp2_match,
+ .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
+ },
+};
+
+module_platform_driver(mvpp2_driver);
+
+MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
new file mode 100644
index 000000000000..6bb69f086794
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -0,0 +1,2467 @@
+/*
+ * Header Parser helpers for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
+/* Update parser tcam and sram hw entries */
+static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+ int i;
+
+ if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return -EINVAL;
+
+ /* Clear entry invalidation bit */
+ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+
+ /* Write sram index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+ return 0;
+}
+
+/* Initialize tcam entry from hw */
+static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
+ struct mvpp2_prs_entry *pe, int tid)
+{
+ int i;
+
+ if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return -EINVAL;
+
+ memset(pe, 0, sizeof(*pe));
+ pe->index = tid;
+
+ /* Write tcam index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+ MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+ if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+
+ /* Write sram index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+
+ return 0;
+}
+
+/* Invalidate tcam hw entry */
+static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
+{
+ /* Write index - indirect access */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+ MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
+{
+ priv->prs_shadow[index].valid = true;
+ priv->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
+ unsigned int ri, unsigned int ri_mask)
+{
+ priv->prs_shadow[index].ri_mask = ri_mask;
+ priv->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
+
+ pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
+ pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+}
+
+/* Update mask for single port in tcam sw entry */
+static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
+ unsigned int port, bool add)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+ if (add)
+ pe->tcam.byte[enable_off] &= ~(1 << port);
+ else
+ pe->tcam.byte[enable_off] |= 1 << port;
+}
+
+/* Update port map in tcam sw entry */
+static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
+ unsigned int ports)
+{
+ unsigned char port_mask = MVPP2_PRS_PORT_MASK;
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+ pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+ pe->tcam.byte[enable_off] &= ~port_mask;
+ pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+}
+
+/* Obtain port map from tcam sw entry */
+static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+ return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char byte,
+ unsigned char enable)
+{
+ pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
+ pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+}
+
+/* Get byte of data and its enable bits from tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
+{
+ *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
+ *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
+ u16 data)
+{
+ int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
+ u16 tcam_data;
+
+ tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
+ if (tcam_data != data)
+ return false;
+ return true;
+}
+
+/* Update ai bits in tcam sw entry */
+static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
+ unsigned int bits, unsigned int enable)
+{
+ int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+
+ for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+ if (!(enable & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ pe->tcam.byte[ai_idx] |= 1 << i;
+ else
+ pe->tcam.byte[ai_idx] &= ~(1 << i);
+ }
+
+ pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+}
+
+/* Get ai bits from tcam sw entry */
+static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
+{
+ return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+}
+
+/* Set ethertype in tcam sw entry */
+static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
+ unsigned short ethertype)
+{
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set vid in tcam sw entry */
+static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
+ unsigned short vid)
+{
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
+ int val)
+{
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+}
+
+/* Clear bits in sram sw entry */
+static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
+ int val)
+{
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+}
+
+/* Update ri bits in sram sw entry */
+static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
+ unsigned int bits, unsigned int mask)
+{
+ unsigned int i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+ int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
+
+ if (!(mask & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+ else
+ mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+ }
+}
+
+/* Obtain ri bits from sram sw entry */
+static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
+{
+ return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
+ unsigned int bits, unsigned int mask)
+{
+ unsigned int i;
+ int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+ else
+ mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+ }
+}
+
+/* Read ai bits from sram sw entry */
+static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
+{
+ u8 bits;
+ int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_en_off = ai_off + 1;
+ int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+ bits = (pe->sram.byte[ai_off] >> ai_shift) |
+ (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+
+ return bits;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
+ unsigned int lu)
+{
+ int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+ mvpp2_prs_sram_bits_clear(pe, sram_next_off,
+ MVPP2_PRS_SRAM_NEXT_LU_MASK);
+ mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+ unsigned int op)
+{
+ /* Set sign */
+ if (shift < 0) {
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ shift = 0 - shift;
+ } else {
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
+ (unsigned char)shift;
+
+ /* Reset and set operation */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+ /* Set base offset as current */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
+ unsigned int type, int offset,
+ unsigned int op)
+{
+ /* Set sign */
+ if (offset < 0) {
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ offset = 0 - offset;
+ } else {
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ MVPP2_PRS_SRAM_UDF_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+ MVPP2_PRS_SRAM_UDF_BITS)] &=
+ ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+ MVPP2_PRS_SRAM_UDF_BITS)] |=
+ (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+ /* Set offset type */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+ MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+ /* Set offset operation */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+ ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+ (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+ (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+ /* Set base offset as current */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+ u8 bits;
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ bits = mvpp2_prs_sram_ai_get(&pe);
+
+ /* Sram store classification lookup ID in AI bits [5:0] */
+ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Return first free tcam index, seeking from start to end */
+static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
+ unsigned char end)
+{
+ int tid;
+
+ if (start > end)
+ swap(start, end);
+
+ if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
+
+ for (tid = start; tid <= end; tid++) {
+ if (!priv->prs_shadow[tid].valid)
+ return tid;
+ }
+
+ return -EINVAL;
+}
+
+/* Enable/disable dropping all mac da's */
+static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+{
+ struct mvpp2_prs_entry pe;
+
+ if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+ /* Entry exist - update port only */
+ mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = MVPP2_PE_DROP_ALL;
+
+ /* Non-promiscuous mode for all ports - DROP unknown packets */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set port to unicast or multicast promiscuous mode */
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add)
+{
+ struct mvpp2_prs_entry pe;
+ unsigned char cast_match;
+ unsigned int ri;
+ int tid;
+
+ if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
+ cast_match = MVPP2_PRS_UCAST_VAL;
+ tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
+ ri = MVPP2_PRS_RI_L2_UCAST;
+ } else {
+ cast_match = MVPP2_PRS_MCAST_VAL;
+ tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
+ ri = MVPP2_PRS_RI_L2_MCAST;
+ }
+
+ /* promiscuous mode - Accept unknown unicast or multicast packets */
+ if (priv->prs_shadow[tid].valid) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = tid;
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+ /* Set result info bits */
+ mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
+
+ /* Match UC or MC addresses */
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
+ MVPP2_PRS_CAST_MASK);
+
+ /* Shift to ethertype */
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa packets */
+static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+ bool tagged, bool extend)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, shift;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+ shift = 4;
+ }
+
+ if (priv->prs_shadow[tid].valid) {
+ /* Entry exist - update port only */
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+ if (tagged) {
+ /* Set tagged bit in DSA tag */
+ mvpp2_prs_tcam_data_byte_set(&pe, 0,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+
+ /* Set ai bits for next iteration */
+ if (extend)
+ mvpp2_prs_sram_ai_update(&pe, 1,
+ MVPP2_PRS_SRAM_AI_MASK);
+ else
+ mvpp2_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Set result info bits to 'single vlan' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ /* If packet is tagged continue check vid filtering */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+ } else {
+ /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
+ mvpp2_prs_sram_shift_set(&pe, shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set result info bits to 'no vlans' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
+ bool add, bool tagged, bool extend)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, shift, port_mask;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
+ MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+ port_mask = 0;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
+ MVPP2_PE_ETYPE_DSA_UNTAGGED;
+ port_mask = MVPP2_PRS_PORT_MASK;
+ shift = 4;
+ }
+
+ if (priv->prs_shadow[tid].valid) {
+ /* Entry exist - update port only */
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+
+ /* Set ethertype */
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
+ mvpp2_prs_match_etype(&pe, 2, 0);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+ MVPP2_PRS_RI_DSA_MASK);
+ /* Shift ethertype + 2 byte reserved + tag*/
+ mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+ if (tagged) {
+ /* Set tagged bit in DSA tag */
+ mvpp2_prs_tcam_data_byte_set(&pe,
+ MVPP2_ETH_TYPE_LEN + 2 + 3,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+ /* If packet is tagged continue check vlans */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ /* Set result info bits to 'no vlans' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+ /* Mask/unmask all ports, depending on dsa type */
+ mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+ }
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (tid = MVPP2_PE_FIRST_FREE_TID;
+ tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ unsigned int ri_bits, ai_bits;
+ bool match;
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
+ if (!match)
+ continue;
+
+ /* Get vlan type */
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+
+ /* Get current ai value from tcam */
+ ai_bits = mvpp2_prs_tcam_ai_get(&pe);
+ /* Clear double vlan bit */
+ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+ if (ai != ai_bits)
+ continue;
+
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Add/update single/triple vlan entry */
+static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+ unsigned int port_map)
+{
+ struct mvpp2_prs_entry pe;
+ int tid_aux, tid;
+ int ret = 0;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_vlan_find(priv, tpid, ai);
+
+ if (tid < 0) {
+ /* Create new tcam entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ /* Get last double vlan tid */
+ for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+ tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+ unsigned int ri_bits;
+
+ if (!priv->prs_shadow[tid_aux].valid ||
+ priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+ MVPP2_PRS_RI_VLAN_DOUBLE)
+ break;
+ }
+
+ if (tid <= tid_aux)
+ return -EINVAL;
+
+ memset(&pe, 0, sizeof(pe));
+ pe.index = tid;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+ mvpp2_prs_match_etype(&pe, 0, tpid);
+
+ /* VLAN tag detected, proceed with VID filtering */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ } else {
+ ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ }
+ mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+ /* Update ports' mask */
+ mvpp2_prs_tcam_port_map_set(&pe, port_map);
+
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return ret;
+}
+
+/* Get first free double vlan ai number */
+static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
+{
+ int i;
+
+ for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+ if (!priv->prs_double_vlans[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/* Search for existing double vlan entry */
+static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
+ unsigned short tpid2)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (tid = MVPP2_PE_FIRST_FREE_TID;
+ tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ unsigned int ri_mask;
+ bool match;
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
+
+ if (!match)
+ continue;
+
+ ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Add or update double vlan entry */
+static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+ unsigned short tpid2,
+ unsigned int port_map)
+{
+ int tid_aux, tid, ai, ret = 0;
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+
+ if (tid < 0) {
+ /* Create new tcam entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ /* Set ai value for new double vlan entry */
+ ai = mvpp2_prs_double_vlan_ai_free_get(priv);
+ if (ai < 0)
+ return ai;
+
+ /* Get first single/triple vlan tid */
+ for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+ tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+ unsigned int ri_bits;
+
+ if (!priv->prs_shadow[tid_aux].valid ||
+ priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ break;
+ }
+
+ if (tid >= tid_aux)
+ return -ERANGE;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = tid;
+
+ priv->prs_double_vlans[ai] = true;
+
+ mvpp2_prs_match_etype(&pe, 0, tpid1);
+ mvpp2_prs_match_etype(&pe, 4, tpid2);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ /* Shift 4 bytes - skip outer vlan tag */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+
+ /* Update ports' mask */
+ mvpp2_prs_tcam_port_map_set(&pe, port_map);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return ret;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
+ unsigned int ri, unsigned int ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+ (proto != IPPROTO_IGMP))
+ return -EINVAL;
+
+ /* Not fragmented packet */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ /* Set next lu to IPv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK_L);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Fragmented packet */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+ /* Clear ri before updating */
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+
+ mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
+ ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+ struct mvpp2_prs_entry pe;
+ int mask, tid;
+
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ switch (l3_cast) {
+ case MVPP2_PRS_L3_MULTI_CAST:
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+ MVPP2_PRS_IPV4_MC_MASK);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ case MVPP2_PRS_L3_BROAD_CAST:
+ mask = MVPP2_PRS_IPV4_BC_MASK;
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Set entries for protocols over IPv6 */
+static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
+ unsigned int ri, unsigned int ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+ (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
+ return -EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ipv6hdr) - 6,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Write HW */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+ return -EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Shift back to IPv6 NH */
+ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+ MVPP2_PRS_IPV6_MC_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Parser per-port initialization */
+static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
+ int lu_max, int offset)
+{
+ u32 val;
+
+ /* Set lookup ID */
+ val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
+ val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+ val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+ mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
+
+ /* Set maximum number of loops for packet received from port */
+ val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
+ val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+ val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+ mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
+
+ /* Set initial offset for packet header extraction for the first
+ * searching loop
+ */
+ val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
+ val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+ val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+ mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
+}
+
+/* Default flow entries initialization for all ports */
+static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int port;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Set flow ID*/
+ mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
+}
+
+/* Set default entry for Marvell Header field */
+static void mvpp2_prs_mh_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ pe.index = MVPP2_PE_MH_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mvpp2_prs_mac_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Non-promiscuous mode for all ports - DROP unknown packets */
+ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_mac_drop_all_set(priv, 0, false);
+ mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
+ mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ /* None tagged EDSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_DSA);
+
+ /* Tagged DSA entry - place holder */
+ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* None tagged EDSA ethertype entry - place holder*/
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA ethertype entry - place holder*/
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA ethertype entry */
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+ /* Tagged DSA ethertype entry */
+ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* Set default entry, in case DSA or EDSA tag not found */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = MVPP2_PE_DSA_DEFAULT;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+ /* Shift 0 bytes */
+ mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+ /* Clear all sram ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Initialize parser entries for VID filtering */
+static void mvpp2_prs_vid_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Set default vid entry */
+ pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
+
+ /* Skip VLAN header - Set offset to 4 bytes */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set default vid entry for extended DSA*/
+ memset(&pe, 0, sizeof(pe));
+
+ /* Set default vid entry */
+ pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
+ MVPP2_PRS_EDSA_VID_AI_BIT);
+
+ /* Skip VLAN header - Set offset to 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Match basic ethertypes */
+static int mvpp2_prs_etype_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Ethertype: PPPoE */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: ARP */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+ /* Generate flow in the next iteration*/
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = true;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: LBTD */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+ /* Generate flow in the next iteration*/
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = true;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: IPv4 without options */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Skip eth_type + 4 bytes of IP header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: IPv4 with options */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ /* Clear tcam data before updating */
+ pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+ pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
+
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD,
+ MVPP2_PRS_IPV4_HEAD_MASK);
+
+ /* Clear ri before updating */
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Ethertype: IPv6 without options */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+ /* Skip DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Generate flow in the next iteration*/
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset even it's unknown L3 */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = true;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int err;
+
+ priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+ MVPP2_PRS_DBL_VLANS_MAX,
+ GFP_KERNEL);
+ if (!priv->prs_double_vlans)
+ return -ENOMEM;
+
+ /* Double VLAN: 0x8100, 0x88A8 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Double VLAN: 0x8100, 0x8100 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Single VLAN: 0x88a8 */
+ err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Single VLAN: 0x8100 */
+ err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Set default double vlan entry */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_DBL;
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Clear ai for next iterations */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_DBL_VLAN_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set default vlan none entry */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_NONE;
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* IPv4 over PPPoE with options */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Skip eth_type + 4 bytes of IP header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* IPv4 over PPPoE without options */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ /* Clear ri before updating */
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* IPv6 over PPPoE */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Skip eth_type + 4 bytes of IPv6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Non-IP over PPPoE */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Set L3 offset even if it's unknown L3 */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int err;
+
+ /* Set entries for TCP, UDP and IGMP over IPv4 */
+ err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ if (err)
+ return err;
+
+ /* IPv4 Broadcast */
+ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
+ if (err)
+ return err;
+
+ /* IPv4 Multicast */
+ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+ if (err)
+ return err;
+
+ /* Default IPv4 entry for unknown protocols */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+ /* Set next lu to IPv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv4 entry for unicast address */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, err;
+
+ /* Set entries for TCP, UDP and ICMP over IPv6 */
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ if (err)
+ return err;
+
+ /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+ /* Result Info: UDF7=1, DS lite */
+ err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
+ MVPP2_PRS_RI_UDF7_IP6_LITE,
+ MVPP2_PRS_RI_UDF7_MASK);
+ if (err)
+ return err;
+
+ /* IPv6 multicast */
+ err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+ if (err)
+ return err;
+
+ /* Entry for checking hop limit */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+ MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv6 entry for unknown protocols */
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ /* Set L4 offset relatively to our current place */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ipv6hdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv6 entry for unknown ext protocols */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Default IPv6 entry for unicast address */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+ /* Finished: go to IPv6 again */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Shift back to IPV6 NH */
+ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Find tcam entry with matched pair <vid,port> */
+static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
+ u16 mask)
+{
+ unsigned char byte[2], enable[2];
+ struct mvpp2_prs_entry pe;
+ u16 rvid, rmask;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VID */
+ for (tid = MVPP2_PE_VID_FILT_RANGE_START;
+ tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+ rmask = ((enable[0] & 0xf) << 8) + enable[1];
+
+ if (rvid != vid || rmask != mask)
+ continue;
+
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Write parser entry for VID filtering */
+int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+{
+ unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
+ port->id * MVPP2_PRS_VLAN_FILT_MAX;
+ unsigned int mask = 0xfff, reg_val, shift;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+ tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+ shift = MVPP2_VLAN_TAG_EDSA_LEN;
+ else
+ shift = MVPP2_VLAN_TAG_LEN;
+
+ /* No such entry */
+ if (tid < 0) {
+
+ /* Go through all entries from first to last in vlan range */
+ tid = mvpp2_prs_tcam_first_free(priv, vid_start,
+ vid_start +
+ MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
+
+ /* There isn't room for a new VID filter */
+ if (tid < 0)
+ return tid;
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+ pe.index = tid;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+
+ /* Enable the current port */
+ mvpp2_prs_tcam_port_set(&pe, port->id, true);
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Skip VLAN header - Set offset to 4 or 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set match on VID */
+ mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Write parser entry for VID filtering */
+void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
+{
+ struct mvpp2 *priv = port->priv;
+ int tid;
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+ tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
+
+ /* No such entry */
+ if (tid < 0)
+ return;
+
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+}
+
+/* Remove all existing VID filters on this port */
+void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ if (priv->prs_shadow[tid].valid)
+ mvpp2_prs_vid_entry_remove(port, tid);
+ }
+}
+
+/* Remove VID filering entry for this port */
+void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
+{
+ unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ struct mvpp2 *priv = port->priv;
+
+ /* Invalidate the guard entry */
+ mvpp2_prs_hw_inv(priv, tid);
+
+ priv->prs_shadow[tid].valid = false;
+}
+
+/* Add guard entry that drops packets when no VID is matched on this port */
+void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
+{
+ unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ struct mvpp2 *priv = port->priv;
+ unsigned int reg_val, shift;
+ struct mvpp2_prs_entry pe;
+
+ if (priv->prs_shadow[tid].valid)
+ return;
+
+ memset(&pe, 0, sizeof(pe));
+
+ pe.index = tid;
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+ shift = MVPP2_VLAN_TAG_EDSA_LEN;
+ else
+ shift = MVPP2_VLAN_TAG_LEN;
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port->id, true);
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Skip VLAN header - Set offset to 4 or 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Drop VLAN packets that don't belong to any VIDs on this port */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Parser default initialization */
+int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ int err, index, i;
+
+ /* Enable tcam table */
+ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+ /* Clear all tcam and sram entries */
+ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+ }
+
+ /* Invalidate all tcam entries */
+ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+ mvpp2_prs_hw_inv(priv, index);
+
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(*priv->prs_shadow),
+ GFP_KERNEL);
+ if (!priv->prs_shadow)
+ return -ENOMEM;
+
+ /* Always start from lookup = 0 */
+ for (index = 0; index < MVPP2_MAX_PORTS; index++)
+ mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
+ MVPP2_PRS_PORT_LU_MAX, 0);
+
+ mvpp2_prs_def_flow_init(priv);
+
+ mvpp2_prs_mh_init(priv);
+
+ mvpp2_prs_mac_init(priv);
+
+ mvpp2_prs_dsa_init(priv);
+
+ mvpp2_prs_vid_init(priv);
+
+ err = mvpp2_prs_etype_init(priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_vlan_init(pdev, priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_pppoe_init(priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip6_init(priv);
+ if (err)
+ return err;
+
+ err = mvpp2_prs_ip4_init(priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
+ const u8 *da, unsigned char *mask)
+{
+ unsigned char tcam_byte, tcam_mask;
+ int index;
+
+ for (index = 0; index < ETH_ALEN; index++) {
+ mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
+ if (tcam_mask != mask[index])
+ return false;
+
+ if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+ return false;
+ }
+
+ return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static int
+mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+ unsigned char *mask, int udf_type)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Go through the all entires with MVPP2_PRS_LU_MAC */
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned int entry_pmap;
+
+ if (!priv->prs_shadow[tid].valid ||
+ (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (priv->prs_shadow[tid].udf != udf_type))
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
+ entry_pmap == pmap)
+ return tid;
+ }
+
+ return -ENOENT;
+}
+
+/* Update parser's mac da entry */
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
+{
+ unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct mvpp2 *priv = port->priv;
+ unsigned int pmap, len, ri;
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+ tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
+ MVPP2_PRS_UDF_MAC_DEF);
+
+ /* No such entry */
+ if (tid < 0) {
+ if (!add)
+ return 0;
+
+ /* Create new TCAM entry */
+ /* Go through the all entries from first to last */
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_MAC_RANGE_START,
+ MVPP2_PE_MAC_RANGE_END);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ }
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port->id, add);
+
+ /* Invalidate the entry if no ports are left enabled */
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (pmap == 0) {
+ if (add)
+ return -EINVAL;
+
+ mvpp2_prs_hw_inv(priv, pe.index);
+ priv->prs_shadow[pe.index].valid = false;
+ return 0;
+ }
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ /* Set result info bits */
+ if (is_broadcast_ether_addr(da)) {
+ ri = MVPP2_PRS_RI_L2_BCAST;
+ } else if (is_multicast_ether_addr(da)) {
+ ri = MVPP2_PRS_RI_L2_MCAST;
+ } else {
+ ri = MVPP2_PRS_RI_L2_UCAST;
+
+ if (ether_addr_equal(da, port->dev->dev_addr))
+ ri |= MVPP2_PRS_RI_MAC_ME_MASK;
+ }
+
+ mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+ mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+
+ /* Shift to ethertype */
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table and hw entry */
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int err;
+
+ /* Remove old parser entry */
+ err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
+ if (err)
+ return err;
+
+ /* Add new parser entry */
+ err = mvpp2_prs_mac_da_accept(port, da, true);
+ if (err)
+ return err;
+
+ /* Set addr in the device */
+ ether_addr_copy(dev->dev_addr, da);
+
+ return 0;
+}
+
+void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ /* Special cases : Don't remove broadcast and port's own
+ * address
+ */
+ if (is_broadcast_ether_addr(da) ||
+ ether_addr_equal(da, port->dev->dev_addr))
+ continue;
+
+ /* Remove entry from TCAM */
+ mvpp2_prs_mac_da_accept(port, da, false);
+ }
+}
+
+int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+{
+ switch (type) {
+ case MVPP2_TAG_TYPE_EDSA:
+ /* Add port to EDSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ /* Remove port from DSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ break;
+
+ case MVPP2_TAG_TYPE_DSA:
+ /* Add port to DSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ /* Remove port from EDSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+
+ case MVPP2_TAG_TYPE_MH:
+ case MVPP2_TAG_TYPE_NONE:
+ /* Remove port form EDSA and DSA entries */
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(priv, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+
+ default:
+ if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set prs flow for the port */
+int mvpp2_prs_def_flow(struct mvpp2_port *port)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_flow_find(port->priv, port->id);
+
+ /* Such entry not exist */
+ if (tid < 0) {
+ /* Go through the all entires from last to first */
+ tid = mvpp2_prs_tcam_first_free(port->priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ /* Set flow ID*/
+ mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ } else {
+ mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ }
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
+ mvpp2_prs_hw_write(port->priv, &pe);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
new file mode 100644
index 000000000000..22fbbc4c8b28
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -0,0 +1,314 @@
+/*
+ * Header Parser definitions for Marvell PPv2 Network Controller
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "mvpp2.h"
+
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_PRS_TCAM_WORDS 6
+#define MVPP2_PRS_SRAM_WORDS 4
+#define MVPP2_PRS_FLOW_ID_SIZE 64
+#define MVPP2_PRS_FLOW_ID_MASK 0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
+#define MVPP2_PRS_IPV4_HEAD 0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
+#define MVPP2_PRS_IPV4_MC 0xe0
+#define MVPP2_PRS_IPV4_MC_MASK 0xf0
+#define MVPP2_PRS_IPV4_BC_MASK 0xff
+#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MASK 0xf
+#define MVPP2_PRS_IPV6_MC 0xff
+#define MVPP2_PRS_IPV6_MC_MASK 0xff
+#define MVPP2_PRS_IPV6_HOP_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX 100
+#define MVPP2_PRS_CAST_MASK BIT(0)
+#define MVPP2_PRS_MCAST_VAL BIT(0)
+#define MVPP2_PRS_UCAST_VAL 0x0
+
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_PORT_MASK 0xff
+#define MVPP2_PRS_LU_MASK 0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
+ (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
+ (((offs) * 2) - ((offs) % 2) + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE 16
+#define MVPP2_PRS_TCAM_PORT_BYTE 17
+#define MVPP2_PRS_TCAM_LU_BYTE 20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD 5
+
+#define MVPP2_PRS_VID_TCAM_BYTE 2
+
+/* TCAM range for unicast and multicast filtering. We have 25 entries per port,
+ * with 4 dedicated to UC filtering and the rest to multicast filtering.
+ * Additionnally we reserve one entry for the broadcast address, and one for
+ * each port's own address.
+ */
+#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
+#define MVPP2_PRS_MAC_RANGE_SIZE 80
+
+/* Number of entries per port dedicated to UC and MC filtering */
+#define MVPP2_PRS_MAC_UC_FILT_MAX 4
+#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
+ MVPP2_PRS_MAC_UC_FILT_MAX)
+
+/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
+ * 10 VLAN ID filter entries per port
+ * 1 default VLAN filter entry per port
+ * It is assumed that there are 3 ports for filter, not including loopback port
+ */
+#define MVPP2_PRS_VLAN_FILT_MAX 11
+#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
+
+#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
+#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
+
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL 0
+#define MVPP2_PE_FIRST_FREE_TID 1
+
+/* MAC filtering range */
+#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
+#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
+ MVPP2_PRS_MAC_RANGE_SIZE + 1)
+/* VLAN filtering range */
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
+ MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
+#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
+#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
+#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+/* reserved */
+#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
+ ((port) * MVPP2_PRS_VLAN_FILT_MAX))
+#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
+ + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
+/* Index of default vid filter for given port */
+#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
+ + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
+
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS 0
+#define MVPP2_PRS_SRAM_RI_WORD 0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_UDF_OFFS 73
+#define MVPP2_PRS_SRAM_UDF_BITS 8
+#define MVPP2_PRS_SRAM_UDF_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
+#define MVPP2_PRS_SRAM_AI_OFFS 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
+#define MVPP2_PRS_SRAM_AI_MASK 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
+#define MVPP2_PRS_RI_DSA_MASK 0x2
+#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_NONE 0x0
+#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_UCAST 0x0
+#define MVPP2_PRS_RI_L2_MCAST BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK 0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_UN 0x0
+#define MVPP2_PRS_RI_L3_IP4 BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6 BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_UCAST 0x0
+#define MVPP2_PRS_RI_L3_MCAST BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
+#define MVPP2_PRS_RI_UDF3_MASK 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
+#define MVPP2_PRS_RI_L4_TCP BIT(22)
+#define MVPP2_PRS_RI_L4_UDP BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI 0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
+#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED true
+#define MVPP2_PRS_UNTAGGED false
+#define MVPP2_PRS_EDSA true
+#define MVPP2_PRS_DSA false
+
+/* MAC entries, shadow udf */
+enum mvpp2_prs_udf {
+ MVPP2_PRS_UDF_MAC_DEF,
+ MVPP2_PRS_UDF_MAC_RANGE,
+ MVPP2_PRS_UDF_L2_DEF,
+ MVPP2_PRS_UDF_L2_DEF_COPY,
+ MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum mvpp2_prs_lookup {
+ MVPP2_PRS_LU_MH,
+ MVPP2_PRS_LU_MAC,
+ MVPP2_PRS_LU_DSA,
+ MVPP2_PRS_LU_VLAN,
+ MVPP2_PRS_LU_VID,
+ MVPP2_PRS_LU_L2,
+ MVPP2_PRS_LU_PPPOE,
+ MVPP2_PRS_LU_IP4,
+ MVPP2_PRS_LU_IP6,
+ MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_LAST,
+};
+
+union mvpp2_prs_tcam_entry {
+ u32 word[MVPP2_PRS_TCAM_WORDS];
+ u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mvpp2_prs_sram_entry {
+ u32 word[MVPP2_PRS_SRAM_WORDS];
+ u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mvpp2_prs_entry {
+ u32 index;
+ union mvpp2_prs_tcam_entry tcam;
+ union mvpp2_prs_sram_entry sram;
+};
+
+struct mvpp2_prs_shadow {
+ bool valid;
+ bool finish;
+
+ /* Lookup ID */
+ int lu;
+
+ /* User defined offset */
+ int udf;
+
+ /* Result info */
+ u32 ri;
+ u32 ri_mask;
+};
+
+int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
+
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
+
+int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
+
+int mvpp2_prs_def_flow(struct mvpp2_port *port);
+
+void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
+
+void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port);
+
+int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid);
+
+void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid);
+
+void mvpp2_prs_vid_remove_all(struct mvpp2_port *port);
+
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
+ enum mvpp2_prs_l2_cast l2_cast, bool add);
+
+void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
+
+int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e0b72bf428f7..d8ebf0a05e0c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2503,7 +2503,6 @@ static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device_node *mac_np;
- const struct of_device_id *match;
struct mtk_eth *eth;
int err;
int i;
@@ -2512,8 +2511,7 @@ static int mtk_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
- match = of_match_device(of_mtk_match, &pdev->dev);
- eth->soc = (struct mtk_soc_data *)match->data;
+ eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
eth->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5c613c6663da..9f54ccbddea7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -593,30 +593,25 @@ static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#if IS_ENABLED(CONFIG_IPV6)
-/* In IPv6 packets, besides subtracting the pseudo header checksum,
- * we also compute/add the IP header checksum which
- * is not added by the HW.
+/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
+ * 4 first bytes : priority, version, flow_lbl
+ * and 2 additional bytes : nexthdr, hop_limit.
*/
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
__u8 nexthdr = ipv6h->nexthdr;
- __wsum csum_pseudo_hdr = 0;
+ __wsum temp;
if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
nexthdr == IPPROTO_HOPOPTS ||
nexthdr == IPPROTO_SCTP))
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
- csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
- sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
- (__force __wsum)htons(nexthdr));
-
- skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
- skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
+ /* priority, version, flow_lbl */
+ temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
+ /* nexthdr and hop_limit */
+ skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
return 0;
}
#endif
@@ -775,8 +770,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ length = xdp.data_end - xdp.data;
if (xdp.data != orig_data) {
- length = xdp.data_end - xdp.data;
frags[0].page_offset = xdp.data -
xdp.data_hard_start;
va = xdp.data;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6b6853773848..0227786308af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return skb_tx_hash(dev, skb);
+ return fallback(dev, skb);
return fallback(dev, skb) % rings_p_up;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index de6b3d416148..46dcbfbe4c5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -165,6 +165,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[36] = "QinQ VST mode support",
[37] = "sl to vl mapping table change event support",
[38] = "user MAC support",
+ [39] = "Report driver version to FW support",
};
int i;
@@ -1038,6 +1039,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
+ if (field32 & (1 << 8))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
if (field32 & (1 << 17))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
@@ -1860,6 +1863,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
+#define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
+#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
@@ -1950,6 +1955,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
+ u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4);
+
+ strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1);
+ mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst);
+ }
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 685337d58276..5342bd8a3d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -43,12 +43,13 @@
#include "fw.h"
/*
- * We allocate in page size (default 4KB on many archs) chunks to avoid high
- * order memory allocations in fragmented/high usage memory situation.
+ * We allocate in as big chunks as we can, up to a maximum of 256 KB
+ * per chunk. Note that the chunks are not necessarily in contiguous
+ * physical memory.
*/
enum {
- MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
- MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
+ MLX4_ICM_ALLOC_SIZE = 1 << 18,
+ MLX4_TABLE_CHUNK_SIZE = 1 << 18,
};
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk = NULL;
int cur_order;
+ gfp_t mask;
int ret;
/* We use sg_set_buf for coherent allocs, which assumes low memory */
@@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (1 << cur_order > npages)
--cur_order;
+ mask = gfp_mask;
+ if (cur_order)
+ mask &= ~__GFP_DIRECT_RECLAIM;
+
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
&chunk->mem[chunk->npages],
- cur_order, gfp_mask);
+ cur_order, mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
- cur_order, gfp_mask,
+ cur_order, mask,
dev->numa_node);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 60172a38c4a4..0a30d81aab3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
static int msi_x = 1;
module_param(msi_x, int, 0444);
-MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
+MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x");
#else /* CONFIG_PCI_MSI */
@@ -2815,6 +2815,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
dev->caps.num_eqs - dev->caps.reserved_eqs,
MAX_MSIX);
+ if (msi_x > 1)
+ nreq = min_t(int, nreq, msi_x);
+
entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -4127,17 +4130,68 @@ static const struct pci_error_handlers mlx4_err_handler = {
.resume = mlx4_pci_resume,
};
+static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+
+ mlx4_err(dev, "suspend was called\n");
+ mutex_lock(&persist->interface_state_mutex);
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ mlx4_unload_one(pdev);
+ mutex_unlock(&persist->interface_state_mutex);
+
+ return 0;
+}
+
+static int mlx4_resume(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
+ int ret = 0;
+
+ mlx4_err(dev, "resume was called\n");
+ total_vfs = dev->persist->num_vfs;
+ memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+
+ mutex_lock(&persist->interface_state_mutex);
+ if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
+ ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
+ nvfs, priv, 1);
+ if (!ret) {
+ ret = restore_current_port_types(dev,
+ dev->persist->curr_port_type,
+ dev->persist->curr_port_poss_type);
+ if (ret)
+ mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);
+ }
+ }
+ mutex_unlock(&persist->interface_state_mutex);
+
+ return ret;
+}
+
static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
.shutdown = mlx4_shutdown,
.remove = mlx4_remove_one,
+ .suspend = mlx4_suspend,
+ .resume = mlx4_resume,
.err_handler = &mlx4_err_handler,
};
static int __init mlx4_verify_params(void)
{
+ if (msi_x < 0) {
+ pr_warn("mlx4_core: bad msi_x: %d\n", msi_x);
+ return -1;
+ }
+
if ((log_num_mac < 0) || (log_num_mac > 7)) {
pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c68da1986e51..cb9e923e8399 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -55,8 +55,8 @@
#include "fw_qos.h"
#define DRV_NAME "mlx4_core"
-#define PFX DRV_NAME ": "
#define DRV_VERSION "4.0-0"
+#define DRV_NAME_FOR_FW "Linux," DRV_NAME "," DRV_VERSION
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 3ef3406ff4cb..10fcc22f4590 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -614,9 +614,9 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
int index_at_dup_port = -1;
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
- if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))))
+ if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
index_at_port = i;
- if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))))
+ if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))
index_at_dup_port = i;
}
/* check that same vlan is not in the tables at different indices */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index c032319f1cb9..2545296a0c08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -30,6 +30,7 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
+ select PAGE_POOL
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -85,3 +86,15 @@ config MLX5_EN_IPSEC
Build support for IPsec cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
+
+config MLX5_EN_TLS
+ bool "TLS cryptography-offload accelaration"
+ depends on MLX5_CORE_EN
+ depends on TLS_DEVICE
+ depends on TLS=y || MLX5_CORE=m
+ depends on MLX5_ACCEL
+ default n
+ ---help---
+ Build support for TLS cryptography-offload accelaration in the NIC.
+ Note: Support for hardware with this capability needs to be selected
+ for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c805769d92a9..9efbf193ad5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -8,24 +8,26 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
diag/fs_tracepoint.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
- fpga/ipsec.o
+ fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
- en_arfs.o en_fs_ethtool.o en_selftest.o
+ en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
+
CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
new file mode 100644
index 000000000000..77ac19f38cbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+
+#include "accel/tls.h"
+#include "mlx5_core.h"
+#include "fpga/tls.h"
+
+int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid)
+{
+ return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, p_swid);
+}
+
+void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+{
+ mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+}
+
+bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_is_tls_device(mdev);
+}
+
+u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_tls_device_caps(mdev);
+}
+
+int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_tls_init(mdev);
+}
+
+void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
+{
+ mlx5_fpga_tls_cleanup(mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
new file mode 100644
index 000000000000..6f9c9f446ecc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_ACCEL_TLS_H__
+#define __MLX5_ACCEL_TLS_H__
+
+#include <linux/mlx5/driver.h>
+#include <linux/tls.h>
+
+#ifdef CONFIG_MLX5_ACCEL
+
+enum {
+ MLX5_ACCEL_TLS_TX = BIT(0),
+ MLX5_ACCEL_TLS_RX = BIT(1),
+ MLX5_ACCEL_TLS_V12 = BIT(2),
+ MLX5_ACCEL_TLS_V13 = BIT(3),
+ MLX5_ACCEL_TLS_LRO = BIT(4),
+ MLX5_ACCEL_TLS_IPV6 = BIT(5),
+ MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
+ MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
+};
+
+struct mlx5_ifc_tls_flow_bits {
+ u8 src_port[0x10];
+ u8 dst_port[0x10];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+ u8 ipv6[0x1];
+ u8 direction_sx[0x1];
+ u8 reserved_at_2[0x1e];
+};
+
+int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid);
+void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
+u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
+int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
+void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
+
+#else
+
+static inline int
+mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
+static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
+static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
+static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
+
+#endif
+
+#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 21cd1703a862..487388aed98f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -135,6 +135,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride);
}
+static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
+{
+ int size = msg->len;
+ int blen = size - min_t(int, sizeof(msg->first.data), size);
+
+ return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
+}
+
static u8 xor8_buf(void *buf, size_t offset, int len)
{
u8 *ptr = buf;
@@ -174,10 +182,7 @@ static void calc_block_sig(struct mlx5_cmd_prot_block *block)
static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *next = msg->next;
- int size = msg->len;
- int blen = size - min_t(int, sizeof(msg->first.data), size);
- int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
- / MLX5_CMD_DATA_BLOCK_SIZE;
+ int n = mlx5_calc_cmd_blocks(msg);
int i = 0;
for (i = 0; i < n && next; i++) {
@@ -220,12 +225,9 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent)
static int verify_signature(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd_mailbox *next = ent->out->next;
+ int n = mlx5_calc_cmd_blocks(ent->out);
int err;
u8 sig;
- int size = ent->out->len;
- int blen = size - min_t(int, sizeof(ent->out->first.data), size);
- int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
- / MLX5_CMD_DATA_BLOCK_SIZE;
int i = 0;
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
@@ -720,9 +722,11 @@ static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
+ int n = mlx5_calc_cmd_blocks(msg);
int data_only;
u32 offset = 0;
int dump_len;
+ int i;
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
@@ -749,7 +753,7 @@ static void dump_command(struct mlx5_core_dev *dev,
offset += sizeof(*ent->lay);
}
- while (next && offset < msg->len) {
+ for (i = 0; i < n && next; i++) {
if (data_only) {
dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
dump_buf(next->buf, dump_len, 1, offset);
@@ -1137,7 +1141,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_msg *msg;
- int blen;
int err;
int n;
int i;
@@ -1146,8 +1149,8 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
if (!msg)
return ERR_PTR(-ENOMEM);
- blen = size - min_t(int, sizeof(msg->first.data), size);
- n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
+ msg->len = size;
+ n = mlx5_calc_cmd_blocks(msg);
for (i = 0; i < n; i++) {
tmp = alloc_cmd_box(dev, flags);
@@ -1165,7 +1168,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
head = tmp;
}
msg->next = head;
- msg->len = size;
return msg;
err_alloc:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index d93ff567b40d..b3820a34e773 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -235,7 +235,7 @@ const char *parse_fs_dst(struct trace_seq *p,
switch (dst->type) {
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- trace_seq_printf(p, "vport=%u\n", dst->vport_num);
+ trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
trace_seq_printf(p, "ft=%p\n", dst->ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 30cad07be2b5..eb9eb7aa953a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -53,6 +53,11 @@
#include "mlx5_core.h"
#include "en_stats.h"
+struct page_pool;
+
+#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
+#define MLX5E_METADATA_ETHER_LEN 8
+
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
@@ -60,6 +65,7 @@
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
+#define MLX5E_MAX_PRIORITY 8
#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
@@ -95,18 +101,22 @@
(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
(MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
+#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
+#define MLX5E_LOG_MAX_RX_WQE_BULK \
+ (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
+
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
+#define MLX5E_RX_MAX_HEAD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
@@ -177,11 +187,16 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
+ struct mlx5_wqe_data_seg data[0];
};
-struct mlx5e_rx_wqe {
+struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data;
+ struct mlx5_wqe_data_seg data[0];
+};
+
+struct mlx5e_rx_wqe_cyc {
+ struct mlx5_wqe_data_seg data[0];
};
struct mlx5e_umr_wqe {
@@ -239,6 +254,7 @@ struct mlx5e_params {
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
+ bool tx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -269,6 +285,11 @@ struct mlx5e_dcbx {
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
u8 cap;
+
+ /* Buffer configuration */
+ bool manual_buffer;
+ u32 cable_len;
+ u32 xoff;
};
struct mlx5e_dcbx_dp {
@@ -282,8 +303,6 @@ enum {
MLX5E_RQ_STATE_AM,
};
-#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))
-
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
@@ -303,7 +322,7 @@ struct mlx5e_cq {
/* control */
struct mlx5_core_dev *mdev;
- struct mlx5_frag_wq_ctrl wq_ctrl;
+ struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_wqe_info {
@@ -328,6 +347,8 @@ enum {
MLX5E_SQ_STATE_ENABLED,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
+ MLX5E_SQ_STATE_AM,
+ MLX5E_SQ_STATE_TLS,
};
struct mlx5e_sq_wqe_info {
@@ -340,11 +361,11 @@ struct mlx5e_txqsq {
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
+ struct net_dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
- struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -357,11 +378,11 @@ struct mlx5e_txqsq {
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
+ struct mlx5e_sq_stats *stats;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
u8 min_inline_mode;
- u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
@@ -392,6 +413,7 @@ struct mlx5e_xdpsq {
struct {
struct mlx5e_dma_info *di;
bool doorbell;
+ bool redirect_flush;
} db;
/* read only */
@@ -425,7 +447,6 @@ struct mlx5e_icosq {
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
- u16 edge;
unsigned long state;
/* control path */
@@ -436,7 +457,7 @@ struct mlx5e_icosq {
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
- return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
+ return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
struct mlx5e_dma_info {
@@ -445,8 +466,9 @@ struct mlx5e_dma_info {
};
struct mlx5e_wqe_frag_info {
- struct mlx5e_dma_info di;
+ struct mlx5e_dma_info *di;
u32 offset;
+ bool last_in_page;
};
struct mlx5e_umr_dma_info {
@@ -459,6 +481,8 @@ struct mlx5e_mpw_info {
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
};
+#define MLX5E_MAX_RX_FRAGS 4
+
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -476,6 +500,9 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+typedef struct sk_buff *
+(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
@@ -483,19 +510,30 @@ enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
};
+struct mlx5e_rq_frag_info {
+ int frag_size;
+ int frag_stride;
+};
+
+struct mlx5e_rq_frags_info {
+ struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
+ u8 num_frags;
+ u8 log_num_frags;
+ u8 wqe_bulk;
+};
+
struct mlx5e_rq {
/* data path */
- struct mlx5_wq_ll wq;
-
union {
struct {
- struct mlx5e_wqe_frag_info *frag_info;
- u32 frag_sz; /* max possible skb frag_sz */
- union {
- bool page_reuse;
- };
+ struct mlx5_wq_cyc wq;
+ struct mlx5e_wqe_frag_info *frags;
+ struct mlx5e_dma_info *di;
+ struct mlx5e_rq_frags_info info;
+ mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
struct {
+ struct mlx5_wq_ll wq;
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
@@ -506,14 +544,13 @@ struct mlx5e_rq {
};
struct {
u16 headroom;
- u8 page_order;
u8 map_dir; /* dma map direction */
} buff;
struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
- struct mlx5e_rq_stats stats;
+ struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp;
@@ -533,6 +570,7 @@ struct mlx5e_rq {
unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
DECLARE_BITMAP(flags, 8);
+ struct page_pool *page_pool;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -560,7 +598,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
- struct mlx5e_ch_stats stats;
+ struct mlx5e_ch_stats *stats;
/* control */
struct mlx5e_priv *priv;
@@ -576,6 +614,12 @@ struct mlx5e_channels {
struct mlx5e_params params;
};
+struct mlx5e_channel_stats {
+ struct mlx5e_ch_stats ch;
+ struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_rq_stats rq;
+} ____cacheline_aligned_in_smp;
+
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
@@ -625,7 +669,6 @@ struct mlx5e_flow_table {
struct mlx5e_tc_table {
struct mlx5_flow_table *t;
- struct rhashtable_params ht_params;
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
@@ -778,6 +821,8 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
+ struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ u8 max_opened_tc;
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
@@ -790,6 +835,9 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_tls *tls;
+#endif
};
struct mlx5e_profile {
@@ -820,6 +868,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -849,6 +899,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+struct sk_buff *
+mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
+struct sk_buff *
+mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_update_stats(struct mlx5e_priv *priv);
@@ -919,8 +975,6 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
-int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
@@ -935,10 +989,21 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
+static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(*wqe, 0, sizeof(**wqe));
+}
+
static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
- u16 pi = *pc & wq->sz_m1;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
@@ -1067,6 +1132,10 @@ void mlx5e_update_stats_work(struct work_struct *work);
int mlx5e_bits_invert(unsigned long a, int size);
+typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
+int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
+ change_hw_mtu_cb set_mtu_cb);
+
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
@@ -1092,9 +1161,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
-int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
-
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
@@ -1107,4 +1173,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
u16 max_channels, u16 mtu);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
+void mlx5e_tx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile
new file mode 100644
index 000000000000..d8e17110f25d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile
@@ -0,0 +1 @@
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
new file mode 100644
index 000000000000..24e3b564964f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "port.h"
+
+/* speed in units of 1Mb */
+static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = 1000,
+ [MLX5E_1000BASE_KX] = 1000,
+ [MLX5E_10GBASE_CX4] = 10000,
+ [MLX5E_10GBASE_KX4] = 10000,
+ [MLX5E_10GBASE_KR] = 10000,
+ [MLX5E_20GBASE_KR2] = 20000,
+ [MLX5E_40GBASE_CR4] = 40000,
+ [MLX5E_40GBASE_KR4] = 40000,
+ [MLX5E_56GBASE_R4] = 56000,
+ [MLX5E_10GBASE_CR] = 10000,
+ [MLX5E_10GBASE_SR] = 10000,
+ [MLX5E_10GBASE_ER] = 10000,
+ [MLX5E_40GBASE_SR4] = 40000,
+ [MLX5E_40GBASE_LR4] = 40000,
+ [MLX5E_50GBASE_SR2] = 50000,
+ [MLX5E_100GBASE_CR4] = 100000,
+ [MLX5E_100GBASE_SR4] = 100000,
+ [MLX5E_100GBASE_KR4] = 100000,
+ [MLX5E_100GBASE_LR4] = 100000,
+ [MLX5E_100BASE_TX] = 100,
+ [MLX5E_1000BASE_T] = 1000,
+ [MLX5E_10GBASE_T] = 10000,
+ [MLX5E_25GBASE_CR] = 25000,
+ [MLX5E_25GBASE_KR] = 25000,
+ [MLX5E_25GBASE_SR] = 25000,
+ [MLX5E_50GBASE_CR2] = 50000,
+ [MLX5E_50GBASE_KR2] = 50000,
+};
+
+u32 mlx5e_port_ptys2speed(u32 eth_proto_oper)
+{
+ unsigned long temp = eth_proto_oper;
+ u32 speed = 0;
+ int i;
+
+ i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER);
+ if (i < MLX5E_LINK_MODES_NUMBER)
+ speed = mlx5e_link_speed[i];
+
+ return speed;
+}
+
+int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
+ u32 eth_proto_oper;
+ int err;
+
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+ if (err)
+ return err;
+
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ *speed = mlx5e_port_ptys2speed(eth_proto_oper);
+ if (!(*speed)) {
+ mlx5_core_warn(mdev, "cannot get port speed\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ u32 max_speed = 0;
+ u32 proto_cap;
+ int err;
+ int i;
+
+ err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+ if (proto_cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, mlx5e_link_speed[i]);
+
+ *speed = max_speed;
+ return 0;
+}
+
+u32 mlx5e_port_speed2linkmodes(u32 speed)
+{
+ u32 link_modes = 0;
+ int i;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (mlx5e_link_speed[i] == speed)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
+
+ return link_modes;
+}
+
+int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out)
+{
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+ void *in;
+ int err;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(pbmc_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 0);
+
+ kfree(in);
+ return err;
+}
+
+int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in)
+{
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+ void *out;
+ int err;
+
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(pbmc_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 1);
+
+ kfree(out);
+ return err;
+}
+
+/* buffer[i]: buffer that priority i mapped to */
+int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
+{
+ int sz = MLX5_ST_SZ_BYTES(pptb_reg);
+ u32 prio_x_buff;
+ void *out;
+ void *in;
+ int prio;
+ int err;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(pptb_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0);
+ if (err)
+ goto out;
+
+ prio_x_buff = MLX5_GET(pptb_reg, out, prio_x_buff);
+ for (prio = 0; prio < 8; prio++) {
+ buffer[prio] = (u8)(prio_x_buff >> (4 * prio)) & 0xF;
+ mlx5_core_dbg(mdev, "prio %d, buffer %d\n", prio, buffer[prio]);
+ }
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+
+int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
+{
+ int sz = MLX5_ST_SZ_BYTES(pptb_reg);
+ u32 prio_x_buff;
+ void *out;
+ void *in;
+ int prio;
+ int err;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* First query the pptb register */
+ MLX5_SET(pptb_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0);
+ if (err)
+ goto out;
+
+ memcpy(in, out, sz);
+ MLX5_SET(pptb_reg, in, local_port, 1);
+
+ /* Update the pm and prio_x_buff */
+ MLX5_SET(pptb_reg, in, pm, 0xFF);
+
+ prio_x_buff = 0;
+ for (prio = 0; prio < 8; prio++)
+ prio_x_buff |= (buffer[prio] << (4 * prio));
+ MLX5_SET(pptb_reg, in, prio_x_buff, prio_x_buff);
+
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 1);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
new file mode 100644
index 000000000000..f8cbd8194179
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5E_EN_PORT_H
+#define __MLX5E_EN_PORT_H
+
+#include <linux/mlx5/driver.h>
+#include "en.h"
+
+u32 mlx5e_port_ptys2speed(u32 eth_proto_oper);
+int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+u32 mlx5e_port_speed2linkmodes(u32 speed);
+
+int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
+int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
+int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
+int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
new file mode 100644
index 000000000000..c047da8752da
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "port_buffer.h"
+
+int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
+ struct mlx5e_port_buffer *port_buffer)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+ u32 total_used = 0;
+ void *buffer;
+ void *out;
+ int err;
+ int i;
+
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5e_port_query_pbmc(mdev, out);
+ if (err)
+ goto out;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
+ port_buffer->buffer[i].lossy =
+ MLX5_GET(bufferx_reg, buffer, lossy);
+ port_buffer->buffer[i].epsb =
+ MLX5_GET(bufferx_reg, buffer, epsb);
+ port_buffer->buffer[i].size =
+ MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
+ port_buffer->buffer[i].xon =
+ MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+ port_buffer->buffer[i].xoff =
+ MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+ total_used += port_buffer->buffer[i].size;
+
+ mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
+ port_buffer->buffer[i].size,
+ port_buffer->buffer[i].xon,
+ port_buffer->buffer[i].xoff,
+ port_buffer->buffer[i].epsb,
+ port_buffer->buffer[i].lossy);
+ }
+
+ port_buffer->port_buffer_size =
+ MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
+ port_buffer->spare_buffer_size =
+ port_buffer->port_buffer_size - total_used;
+
+ mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n",
+ port_buffer->port_buffer_size,
+ port_buffer->spare_buffer_size);
+out:
+ kfree(out);
+ return err;
+}
+
+static int port_set_buffer(struct mlx5e_priv *priv,
+ struct mlx5e_port_buffer *port_buffer)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+ void *buffer;
+ void *in;
+ int err;
+ int i;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ err = mlx5e_port_query_pbmc(mdev, in);
+ if (err)
+ goto out;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+
+ MLX5_SET(bufferx_reg, buffer, size,
+ port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
+ MLX5_SET(bufferx_reg, buffer, lossy,
+ port_buffer->buffer[i].lossy);
+ MLX5_SET(bufferx_reg, buffer, xoff_threshold,
+ port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
+ MLX5_SET(bufferx_reg, buffer, xon_threshold,
+ port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
+ }
+
+ err = mlx5e_port_set_pbmc(mdev, in);
+out:
+ kfree(in);
+ return err;
+}
+
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
+{
+ u32 speed;
+ u32 xoff;
+ int err;
+
+ err = mlx5e_port_linkspeed(priv->mdev, &speed);
+ if (err)
+ return 0;
+
+ xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
+
+ mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
+ return xoff;
+}
+
+static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
+ u32 xoff, unsigned int mtu)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ if (port_buffer->buffer[i].lossy) {
+ port_buffer->buffer[i].xoff = 0;
+ port_buffer->buffer[i].xon = 0;
+ continue;
+ }
+
+ if (port_buffer->buffer[i].size <
+ (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ return -ENOMEM;
+
+ port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
+ port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
+ }
+
+ return 0;
+}
+
+/**
+ * update_buffer_lossy()
+ * mtu: device's MTU
+ * pfc_en: <input> current pfc configuration
+ * buffer: <input> current prio to buffer mapping
+ * xoff: <input> xoff value
+ * port_buffer: <output> port receive buffer configuration
+ * change: <output>
+ *
+ * Update buffer configuration based on pfc configuraiton and priority
+ * to buffer mapping.
+ * Buffer's lossy bit is changed to:
+ * lossless if there is at least one PFC enabled priority mapped to this buffer
+ * lossy if all priorities mapped to this buffer are PFC disabled
+ *
+ * Return:
+ * Return 0 if no error.
+ * Set change to true if buffer configuration is modified.
+ */
+static int update_buffer_lossy(unsigned int mtu,
+ u8 pfc_en, u8 *buffer, u32 xoff,
+ struct mlx5e_port_buffer *port_buffer,
+ bool *change)
+{
+ bool changed = false;
+ u8 lossy_count;
+ u8 prio_count;
+ u8 lossy;
+ int prio;
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ prio_count = 0;
+ lossy_count = 0;
+
+ for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) {
+ if (buffer[prio] != i)
+ continue;
+
+ prio_count++;
+ lossy_count += !(pfc_en & (1 << prio));
+ }
+
+ if (lossy_count == prio_count)
+ lossy = 1;
+ else /* lossy_count < prio_count */
+ lossy = 0;
+
+ if (lossy != port_buffer->buffer[i].lossy) {
+ port_buffer->buffer[i].lossy = lossy;
+ changed = true;
+ }
+ }
+
+ if (changed) {
+ err = update_xoff_threshold(port_buffer, xoff, mtu);
+ if (err)
+ return err;
+
+ *change = true;
+ }
+
+ return 0;
+}
+
+int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ u32 change, unsigned int mtu,
+ struct ieee_pfc *pfc,
+ u32 *buffer_size,
+ u8 *prio2buffer)
+{
+ struct mlx5e_port_buffer port_buffer;
+ u32 xoff = calculate_xoff(priv, mtu);
+ bool update_prio2buffer = false;
+ u8 buffer[MLX5E_MAX_PRIORITY];
+ bool update_buffer = false;
+ u32 total_used = 0;
+ u8 curr_pfc_en;
+ int err;
+ int i;
+
+ mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+
+ err = mlx5e_port_query_buffer(priv, &port_buffer);
+ if (err)
+ return err;
+
+ if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
+ update_buffer = true;
+ err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ if (err)
+ return err;
+ }
+
+ if (change & MLX5E_PORT_BUFFER_PFC) {
+ err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
+ if (err)
+ return err;
+
+ err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ &port_buffer, &update_buffer);
+ if (err)
+ return err;
+ }
+
+ if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
+ update_prio2buffer = true;
+ err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL);
+ if (err)
+ return err;
+
+ err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
+ &port_buffer, &update_buffer);
+ if (err)
+ return err;
+ }
+
+ if (change & MLX5E_PORT_BUFFER_SIZE) {
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
+ if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
+ mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
+ __func__, i);
+ return -EINVAL;
+ }
+
+ port_buffer.buffer[i].size = buffer_size[i];
+ total_used += buffer_size[i];
+ }
+
+ mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
+
+ if (total_used > port_buffer.port_buffer_size)
+ return -EINVAL;
+
+ update_buffer = true;
+ err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ if (err)
+ return err;
+ }
+
+ /* Need to update buffer configuration if xoff value is changed */
+ if (!update_buffer && xoff != priv->dcbx.xoff) {
+ update_buffer = true;
+ err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ if (err)
+ return err;
+ }
+ priv->dcbx.xoff = xoff;
+
+ /* Apply the settings */
+ if (update_buffer) {
+ err = port_set_buffer(priv, &port_buffer);
+ if (err)
+ return err;
+ }
+
+ if (update_prio2buffer)
+ err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
new file mode 100644
index 000000000000..34f55b81a0de
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_PORT_BUFFER_H__
+#define __MLX5_EN_PORT_BUFFER_H__
+
+#include "en.h"
+#include "port.h"
+
+#define MLX5E_MAX_BUFFER 8
+#define MLX5E_BUFFER_CELL_SHIFT 7
+#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
+
+#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
+ MLX5_CAP_PCAM_REG(mdev, pbmc) && \
+ MLX5_CAP_PCAM_REG(mdev, pptb))
+
+enum {
+ MLX5E_PORT_BUFFER_CABLE_LEN = BIT(0),
+ MLX5E_PORT_BUFFER_PFC = BIT(1),
+ MLX5E_PORT_BUFFER_PRIO2BUFFER = BIT(2),
+ MLX5E_PORT_BUFFER_SIZE = BIT(3),
+};
+
+struct mlx5e_bufferx_reg {
+ u8 lossy;
+ u8 epsb;
+ u32 size;
+ u32 xoff;
+ u32 xon;
+};
+
+struct mlx5e_port_buffer {
+ u32 port_buffer_size;
+ u32 spare_buffer_size;
+ struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER];
+};
+
+int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ u32 change, unsigned int mtu,
+ struct ieee_pfc *pfc,
+ u32 *buffer_size,
+ u8 *prio2buffer);
+
+int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
+ struct mlx5e_port_buffer *port_buffer);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
new file mode 100644
index 000000000000..f20074dbef32
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_EN_ACCEL_H__
+#define __MLX5E_EN_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
+#include "en.h"
+
+static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
+ struct mlx5e_txqsq *sq,
+ struct net_device *dev,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
+ skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
+ if (unlikely(!skb))
+ return NULL;
+ }
+#endif
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
+ skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
+ if (unlikely(!skb))
+ return NULL;
+ }
+#endif
+
+ return skb;
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 1198fc1eba4c..93bf10e6508c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -45,9 +45,6 @@
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
-#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
-#define MLX5E_METADATA_ETHER_LEN 8
-
struct mlx5e_priv;
struct mlx5e_ipsec_sw_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
new file mode 100644
index 000000000000..d167845271c3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <net/ipv6.h>
+#include "en_accel/tls.h"
+#include "accel/tls.h"
+
+static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ MLX5_SET(tls_flow, flow, ipv6, 0);
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ MLX5_SET(tls_flow, flow, ipv6, 1);
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+}
+#endif
+
+static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
+ MLX5_FLD_SZ_BYTES(tls_flow, src_port));
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
+ MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
+}
+
+static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
+{
+ switch (sk->sk_family) {
+ case AF_INET:
+ mlx5e_tls_set_ipv4_flow(flow, sk);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (!sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
+ mlx5e_tls_set_ipv4_flow(flow, sk);
+ break;
+ }
+ if (!(caps & MLX5_ACCEL_TLS_IPV6))
+ goto error_out;
+
+ mlx5e_tls_set_ipv6_flow(flow, sk);
+ break;
+#endif
+ default:
+ goto error_out;
+ }
+
+ mlx5e_tls_set_flow_tcp_ports(flow, sk);
+ return 0;
+error_out:
+ return -EINVAL;
+}
+
+static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 caps = mlx5_accel_tls_device_caps(mdev);
+ int ret = -ENOMEM;
+ void *flow;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EINVAL;
+
+ flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
+ if (!flow)
+ return ret;
+
+ ret = mlx5e_tls_set_flow(flow, sk, caps);
+ if (ret)
+ goto free_flow;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ struct mlx5e_tls_offload_context *tx_ctx =
+ mlx5e_get_tls_tx_context(tls_ctx);
+ u32 swid;
+
+ ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, &swid);
+ if (ret < 0)
+ goto free_flow;
+
+ tx_ctx->swid = htonl(swid);
+ tx_ctx->expected_seq = start_offload_tcp_sn;
+ }
+
+ return 0;
+free_flow:
+ kfree(flow);
+ return ret;
+}
+
+static void mlx5e_tls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+
+ mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
+ } else {
+ netdev_err(netdev, "unsupported direction %d\n", direction);
+ }
+}
+
+static const struct tlsdev_ops mlx5e_tls_ops = {
+ .tls_dev_add = mlx5e_tls_add,
+ .tls_dev_del = mlx5e_tls_del,
+};
+
+void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5_accel_is_tls_device(priv->mdev))
+ return;
+
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &mlx5e_tls_ops;
+}
+
+int mlx5e_tls_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+
+ if (!tls)
+ return -ENOMEM;
+
+ priv->tls = tls;
+ return 0;
+}
+
+void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls = priv->tls;
+
+ if (!tls)
+ return;
+
+ kfree(tls);
+ priv->tls = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
new file mode 100644
index 000000000000..b6162178f621
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#ifndef __MLX5E_TLS_H__
+#define __MLX5E_TLS_H__
+
+#ifdef CONFIG_MLX5_EN_TLS
+
+#include <net/tls.h>
+#include "en.h"
+
+struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_drop_metadata;
+ atomic64_t tx_tls_drop_resync_alloc;
+ atomic64_t tx_tls_drop_no_sync_data;
+ atomic64_t tx_tls_drop_bypass_required;
+};
+
+struct mlx5e_tls {
+ struct mlx5e_tls_sw_stats sw_stats;
+};
+
+struct mlx5e_tls_offload_context {
+ struct tls_offload_context base;
+ u32 expected_seq;
+ __be32 swid;
+};
+
+static inline struct mlx5e_tls_offload_context *
+mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
+ TLS_OFFLOAD_CONTEXT_SIZE);
+ return container_of(tls_offload_ctx(tls_ctx),
+ struct mlx5e_tls_offload_context,
+ base);
+}
+
+void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_tls_init(struct mlx5e_priv *priv);
+void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_tls_get_count(struct mlx5e_priv *priv);
+int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+#else
+
+static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { }
+static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
+static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
+static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
+
+#endif
+
+#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
new file mode 100644
index 000000000000..15aef71d1957
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "en_accel/tls.h"
+#include "en_accel/tls_rxtx.h"
+
+#define SYNDROME_OFFLOAD_REQUIRED 32
+#define SYNDROME_SYNC 33
+
+struct sync_info {
+ u64 rcd_sn;
+ s32 sync_len;
+ int nr_frags;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+struct mlx5e_tls_metadata {
+ /* One byte of syndrome followed by 3 bytes of swid */
+ __be32 syndrome_swid;
+ __be16 first_seq;
+ /* packet type ID field */
+ __be16 ethertype;
+} __packed;
+
+static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
+{
+ struct mlx5e_tls_metadata *pet;
+ struct ethhdr *eth;
+
+ if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
+ return -ENOMEM;
+
+ eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
+ skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
+ pet = (struct mlx5e_tls_metadata *)(eth + 1);
+
+ memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
+ 2 * ETH_ALEN);
+
+ eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
+ pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+
+ return 0;
+}
+
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+ u32 tcp_seq, struct sync_info *info)
+{
+ int remaining, i = 0, ret = -EINVAL;
+ struct tls_record_info *record;
+ unsigned long flags;
+ s32 sync_size;
+
+ spin_lock_irqsave(&context->base.lock, flags);
+ record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
+
+ if (unlikely(!record))
+ goto out;
+
+ sync_size = tcp_seq - tls_record_start_seq(record);
+ info->sync_len = sync_size;
+ if (unlikely(sync_size < 0)) {
+ if (tls_record_is_start_marker(record))
+ goto done;
+
+ goto out;
+ }
+
+ remaining = sync_size;
+ while (remaining > 0) {
+ info->frags[i] = record->frags[i];
+ __skb_frag_ref(&info->frags[i]);
+ remaining -= skb_frag_size(&info->frags[i]);
+
+ if (remaining < 0)
+ skb_frag_size_add(&info->frags[i], remaining);
+
+ i++;
+ }
+ info->nr_frags = i;
+done:
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&context->base.lock, flags);
+ return ret;
+}
+
+static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
+ struct sk_buff *nskb, u32 tcp_seq,
+ int headln, __be64 rcd_sn)
+{
+ struct mlx5e_tls_metadata *pet;
+ u8 syndrome = SYNDROME_SYNC;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ int data_len, mss;
+
+ nskb->dev = skb->dev;
+ skb_reset_mac_header(nskb);
+ skb_set_network_header(nskb, skb_network_offset(skb));
+ skb_set_transport_header(nskb, skb_transport_offset(skb));
+ memcpy(nskb->data, skb->data, headln);
+ memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
+
+ iph = ip_hdr(nskb);
+ iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
+ th = tcp_hdr(nskb);
+ data_len = nskb->len - headln;
+ tcp_seq -= data_len;
+ th->seq = htonl(tcp_seq);
+
+ mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
+ skb_shinfo(nskb)->gso_size = 0;
+ if (data_len > mss) {
+ skb_shinfo(nskb)->gso_size = mss;
+ skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
+ }
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+
+ pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
+ memcpy(pet, &syndrome, sizeof(syndrome));
+ pet->first_seq = htons(tcp_seq);
+
+ /* MLX5 devices don't care about the checksum partial start, offset
+ * and pseudo header
+ */
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+
+ nskb->xmit_more = 1;
+ nskb->queue_mapping = skb->queue_mapping;
+}
+
+static struct sk_buff *
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi,
+ struct mlx5e_tls *tls)
+{
+ u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct sync_info info;
+ struct sk_buff *nskb;
+ int linear_len = 0;
+ int headln;
+ int i;
+
+ sq->stats->tls_ooo++;
+
+ if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
+ /* We might get here if a retransmission reaches the driver
+ * after the relevant record is acked.
+ * It should be safe to drop the packet in this case
+ */
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
+ goto err_out;
+ }
+
+ if (unlikely(info.sync_len < 0)) {
+ u32 payload;
+
+ headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload = skb->len - headln;
+ if (likely(payload <= -info.sync_len))
+ /* SKB payload doesn't require offload
+ */
+ return skb;
+
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
+ goto err_out;
+ }
+
+ if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
+ goto err_out;
+ }
+
+ headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ linear_len += headln + sizeof(info.rcd_sn);
+ nskb = alloc_skb(linear_len, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
+ goto err_out;
+ }
+
+ context->expected_seq = tcp_seq + skb->len - headln;
+ skb_put(nskb, linear_len);
+ for (i = 0; i < info.nr_frags; i++)
+ skb_shinfo(nskb)->frags[i] = info.frags[i];
+
+ skb_shinfo(nskb)->nr_frags = info.nr_frags;
+ nskb->data_len = info.sync_len;
+ nskb->len += info.sync_len;
+ sq->stats->tls_resync_bytes += nskb->len;
+ mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
+ cpu_to_be64(info.rcd_sn));
+ mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+ mlx5e_sq_fetch_wqe(sq, wqe, pi);
+ return skb;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_tls_offload_context *context;
+ struct tls_context *tls_ctx;
+ u32 expected_seq;
+ int datalen;
+ u32 skb_seq;
+
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ goto out;
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (unlikely(tls_ctx->netdev != netdev))
+ goto out;
+
+ skb_seq = ntohl(tcp_hdr(skb)->seq);
+ context = mlx5e_get_tls_tx_context(tls_ctx);
+ expected_seq = context->expected_seq;
+
+ if (unlikely(expected_seq != skb_seq)) {
+ skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
+ goto out;
+ }
+
+ if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
+ atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ goto out;
+ }
+
+ context->expected_seq = skb_seq + datalen;
+out:
+ return skb;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
new file mode 100644
index 000000000000..405dfd302225
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_TLS_RXTX_H__
+#define __MLX5E_TLS_RXTX_H__
+
+#ifdef CONFIG_MLX5_EN_TLS
+
+#include <linux/skbuff.h>
+#include "en.h"
+
+struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi);
+
+#endif /* CONFIG_MLX5_EN_TLS */
+
+#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
new file mode 100644
index 000000000000..01468ec27446
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "accel/tls.h"
+#include "fpga/sdk.h"
+#include "en_accel/tls.h"
+
+static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
+};
+
+#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
+ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
+
+#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
+
+int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+{
+ if (!priv->tls)
+ return 0;
+
+ return NUM_TLS_SW_COUNTERS;
+}
+
+int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ unsigned int i, idx = 0;
+
+ if (!priv->tls)
+ return 0;
+
+ for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_tls_sw_stats_desc[i].format);
+
+ return NUM_TLS_SW_COUNTERS;
+}
+
+int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ int i, idx = 0;
+
+ if (!priv->tls)
+ return 0;
+
+ for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_tls_sw_stats_desc, i);
+
+ return NUM_TLS_SW_COUNTERS;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 610d485c4b03..75e4308ba786 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -213,7 +213,7 @@ out:
}
#define MLX5E_ARFS_NUM_GROUPS 2
-#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
+#define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1)
#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
MLX5E_ARFS_GROUP2_SIZE)
@@ -565,7 +565,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
- "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+ "Failed to modify aRFS rule destination to rq=%d\n", rxq);
}
static void arfs_handle_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c641d5656b2d..0a52f31fef37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -32,8 +32,8 @@
#include <linux/device.h>
#include <linux/netdevice.h>
#include "en.h"
-
-#define MLX5E_MAX_PRIORITY 8
+#include "en/port.h"
+#include "en/port_buffer.h"
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
@@ -41,6 +41,9 @@
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
+/* Max supported cable length is 1000 meters */
+#define MLX5E_MAX_CABLE_LENGTH 1000
+
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_LOWEST_PRIO_GROUP = 0,
@@ -338,6 +341,9 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
}
+ if (MLX5_BUFFER_SUPPORTED(mdev))
+ pfc->delay = priv->dcbx.cable_len;
+
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
}
@@ -346,16 +352,39 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 old_cable_len = priv->dcbx.cable_len;
+ struct ieee_pfc pfc_new;
+ u32 changed = 0;
u8 curr_pfc_en;
- int ret;
+ int ret = 0;
+ /* pfc_en */
mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
+ if (pfc->pfc_en != curr_pfc_en) {
+ ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
+ if (ret)
+ return ret;
+ mlx5_toggle_port_link(mdev);
+ changed |= MLX5E_PORT_BUFFER_PFC;
+ }
- if (pfc->pfc_en == curr_pfc_en)
- return 0;
+ if (pfc->delay &&
+ pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
+ pfc->delay != priv->dcbx.cable_len) {
+ priv->dcbx.cable_len = pfc->delay;
+ changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
+ }
- ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
- mlx5_toggle_port_link(mdev);
+ if (MLX5_BUFFER_SUPPORTED(mdev)) {
+ pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
+ if (priv->dcbx.manual_buffer)
+ ret = mlx5e_port_manual_buffer_config(priv, changed,
+ dev->mtu, &pfc_new,
+ NULL, NULL);
+
+ if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
+ priv->dcbx.cable_len = old_cable_len;
+ }
if (!ret) {
mlx5e_dbg(HW, priv,
@@ -873,6 +902,90 @@ static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
cee_cfg->pfc_enable = state;
}
+static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
+ struct dcbnl_buffer *dcb_buffer)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_port_buffer port_buffer;
+ u8 buffer[MLX5E_MAX_PRIORITY];
+ int i, err;
+
+ if (!MLX5_BUFFER_SUPPORTED(mdev))
+ return -EOPNOTSUPP;
+
+ err = mlx5e_port_query_priority2buffer(mdev, buffer);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
+ dcb_buffer->prio2buffer[i] = buffer[i];
+
+ err = mlx5e_port_query_buffer(priv, &port_buffer);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
+ dcb_buffer->total_size = port_buffer.port_buffer_size;
+
+ return 0;
+}
+
+static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
+ struct dcbnl_buffer *dcb_buffer)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_port_buffer port_buffer;
+ u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
+ u32 *buffer_size = NULL;
+ u8 *prio2buffer = NULL;
+ u32 changed = 0;
+ int i, err;
+
+ if (!MLX5_BUFFER_SUPPORTED(mdev))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
+
+ for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
+ mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
+
+ err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
+ if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
+ changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
+ prio2buffer = dcb_buffer->prio2buffer;
+ break;
+ }
+ }
+
+ err = mlx5e_port_query_buffer(priv, &port_buffer);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
+ changed |= MLX5E_PORT_BUFFER_SIZE;
+ buffer_size = dcb_buffer->buffer_size;
+ break;
+ }
+ }
+
+ if (!changed)
+ return 0;
+
+ priv->dcbx.manual_buffer = true;
+ err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
+ buffer_size, prio2buffer);
+ return err;
+}
+
const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_getets = mlx5e_dcbnl_ieee_getets,
.ieee_setets = mlx5e_dcbnl_ieee_setets,
@@ -884,6 +997,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_delapp = mlx5e_dcbnl_ieee_delapp,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
+ .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
+ .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
/* CEE interfaces */
.setall = mlx5e_dcbnl_setall,
@@ -1091,5 +1206,8 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+ priv->dcbx.manual_buffer = false;
+ priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
+
mlx5e_ets_init(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index 602851ab5b14..d67adf70a97b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -33,16 +33,30 @@
#include <linux/net_dim.h>
#include "en.h"
+static void
+mlx5e_complete_dim_work(struct net_dim *dim, struct net_dim_cq_moder moder,
+ struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
+{
+ mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
+ dim->state = NET_DIM_START_MEASURE;
+}
+
void mlx5e_rx_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim,
- work);
+ struct net_dim *dim = container_of(work, struct net_dim, work);
struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
- struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
- dim->profile_ix);
+ struct net_dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
+ mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
+}
- dim->state = NET_DIM_START_MEASURE;
+void mlx5e_tx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct net_dim_cq_moder cur_moder =
+ net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+
+ mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 37fd0245b6c1..fffe514ba855 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,6 +31,7 @@
*/
#include "en.h"
+#include "en/port.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -59,18 +60,16 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
- u32 speed;
};
static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
-#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
+#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, ...) \
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
unsigned int i; \
cfg = &ptys2ethtool_table[reg_]; \
- cfg->speed = speed_; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
@@ -83,55 +82,55 @@ static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
void mlx5e_build_ptys2ethtool_map(void)
{
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4,
ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4,
ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4,
ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2,
ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4,
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4,
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR,
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2,
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
@@ -389,14 +388,20 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
+ struct net_dim_cq_moder *rx_moder, *tx_moder;
+
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
- coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec;
- coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
- coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
- coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+ rx_moder = &priv->channels.params.rx_cq_moderation;
+ coal->rx_coalesce_usecs = rx_moder->usec;
+ coal->rx_max_coalesced_frames = rx_moder->pkts;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+
+ tx_moder = &priv->channels.params.tx_cq_moderation;
+ coal->tx_coalesce_usecs = tx_moder->usec;
+ coal->tx_max_coalesced_frames = tx_moder->pkts;
+ coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
return 0;
}
@@ -438,6 +443,7 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
+ struct net_dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
@@ -463,11 +469,15 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
- new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
- new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
- new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
- new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ rx_moder = &new_channels.params.rx_cq_moderation;
+ rx_moder->usec = coal->rx_coalesce_usecs;
+ rx_moder->pkts = coal->rx_max_coalesced_frames;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+
+ tx_moder = &new_channels.params.tx_cq_moderation;
+ tx_moder->usec = coal->tx_coalesce_usecs;
+ tx_moder->pkts = coal->tx_max_coalesced_frames;
+ new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -475,7 +485,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
+ (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
+
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
@@ -604,43 +616,24 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
}
}
-int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
-{
- u32 max_speed = 0;
- u32 proto_cap;
- int err;
- int i;
-
- err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
- if (err)
- return err;
-
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
- if (proto_cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, ptys2ethtool_table[i].speed);
-
- *speed = max_speed;
- return 0;
-}
-
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
struct ethtool_link_ksettings *link_ksettings)
{
- int i;
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
- speed = ptys2ethtool_table[i].speed;
- duplex = DUPLEX_FULL;
- break;
- }
+ speed = mlx5e_port_ptys2speed(eth_proto_oper);
+ if (!speed) {
+ speed = SPEED_UNKNOWN;
+ goto out;
}
+
+ duplex = DUPLEX_FULL;
+
out:
link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
@@ -798,18 +791,6 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
return ptys_modes;
}
-static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
-{
- u32 i, speed_links = 0;
-
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (ptys2ethtool_table[i].speed == speed)
- speed_links |= MLX5E_PROT_MASK(i);
- }
-
- return speed_links;
-}
-
static int mlx5e_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *link_ksettings)
{
@@ -829,7 +810,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
- mlx5e_ethtool2ptys_speed_link(speed);
+ mlx5e_port_speed2linkmodes(speed);
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
if (err) {
@@ -1534,6 +1515,9 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EOPNOTSUPP;
if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
return -EINVAL;
+ } else if (priv->channels.params.lro_en) {
+ netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n");
+ return -EINVAL;
}
new_channels.params = priv->channels.params;
@@ -1608,6 +1592,10 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
out:
mutex_unlock(&priv->state_lock);
+
+ /* Need to fix some features.. */
+ netdev_update_features(netdev);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f64dda2bed31..76cc10e44080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -277,7 +277,6 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_cvlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
priv->fs.vlan.active_cvlans_rule[vid] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b29c1d93f058..89c96a0f708e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -35,18 +35,23 @@
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <linux/bpf.h>
+#include <net/page_pool.h>
#include "eswitch.h"
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls.h"
#include "accel/ipsec.h"
+#include "accel/tls.h"
#include "vxlan.h"
+#include "en/port.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
+ struct mlx5e_rq_frags_info frags_info;
};
struct mlx5e_sq_param {
@@ -89,7 +94,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
return true;
}
-static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
+static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
{
if (!params->xdp_prog) {
u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
@@ -103,19 +108,27 @@ static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
{
- u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+ u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
}
+static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+
+ return !params->lro_en && frag_sz <= PAGE_SIZE;
+}
+
static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+ u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
s8 signed_log_num_strides_param;
u8 log_num_strides;
- if (params->lro_en || frag_sz > PAGE_SIZE)
+ if (!mlx5e_rx_is_linear_skb(mdev, params))
return false;
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
@@ -141,7 +154,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
- return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params));
+ return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
return MLX5E_MPWQE_STRIDE_SZ(mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
@@ -159,16 +172,15 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
{
u16 linear_rq_headroom = params->xdp_prog ?
XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ bool is_linear_skb;
linear_rq_headroom += NET_IP_ALIGN;
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
- return linear_rq_headroom;
-
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
- return linear_rq_headroom;
+ is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
+ mlx5e_rx_is_linear_skb(mdev, params) :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
- return 0;
+ return is_linear_skb ? linear_rq_headroom : 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
@@ -178,14 +190,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- /* Extra room needed for build_skb */
- params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- }
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
@@ -209,7 +213,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
+ MLX5_WQ_TYPE_CYCLIC;
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -323,10 +327,30 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
+static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_size(&rq->wqe.wq);
+ }
+}
+
+static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return rq->mpwqe.wq.cur_sz;
+ default:
+ return rq->wqe.wq.cur_sz;
+ }
+}
+
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
- int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
GFP_KERNEL, cpu_to_node(c->cpu));
@@ -374,7 +398,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
+ u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
@@ -384,31 +408,77 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
}
+static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
+{
+ struct mlx5e_wqe_frag_info next_frag, *prev;
+ int i;
+
+ next_frag.di = &rq->wqe.di[0];
+ next_frag.offset = 0;
+ prev = NULL;
+
+ for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
+ struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_wqe_frag_info *frag =
+ &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
+ int f;
+
+ for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
+ if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
+ next_frag.di++;
+ next_frag.offset = 0;
+ if (prev)
+ prev->last_in_page = true;
+ }
+ *frag = next_frag;
+
+ /* prepare next */
+ next_frag.offset += frag_info[f].frag_stride;
+ prev = frag;
+ }
+ }
+
+ if (prev)
+ prev->last_in_page = true;
+}
+
+static int mlx5e_init_di_list(struct mlx5e_rq *rq,
+ struct mlx5e_params *params,
+ int wq_sz, int cpu)
+{
+ int len = wq_sz << rq->wqe.info.log_num_frags;
+
+ rq->wqe.di = kvzalloc_node(len * sizeof(*rq->wqe.di),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!rq->wqe.di)
+ return -ENOMEM;
+
+ mlx5e_init_frags_partition(rq);
+
+ return 0;
+}
+
+static void mlx5e_free_di_list(struct mlx5e_rq *rq)
+{
+ kvfree(rq->wqe.di);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq)
{
+ struct page_pool_params pp_params = { 0 };
struct mlx5_core_dev *mdev = c->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
- u32 byte_count;
- int npages;
+ u32 pool_size;
int wq_sz;
int err;
int i;
rqp->wq.db_numa_node = cpu_to_node(c->cpu);
- err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
- &rq->wq_ctrl);
- if (err)
- return err;
-
- rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
-
- wq_sz = mlx5_wq_ll_get_size(&rq->wq);
-
rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
@@ -418,6 +488,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->ix = c->ix;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
@@ -432,9 +503,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
+ pool_size = 1 << params->log_rq_mtu_frames;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+
+ pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
+
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
@@ -459,8 +542,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
- byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
-
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_wq_destroy;
@@ -468,16 +549,31 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5e_rq_alloc_mpwqe_info(rq, c);
if (err)
- goto err_destroy_umr_mkey;
+ goto err_free;
break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->wqe.frag_info =
- kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->wqe.frag_info) {
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
+
+ rq->wqe.info = rqp->frags_info;
+ rq->wqe.frags =
+ kvzalloc_node((wq_sz << rq->wqe.info.log_num_frags) *
+ sizeof(*rq->wqe.frags),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe.frags) {
err = -ENOMEM;
- goto err_rq_wq_destroy;
+ goto err_free;
}
+
+ err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu);
+ if (err)
+ goto err_free;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
@@ -488,41 +584,71 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
#endif
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
if (!rq->handle_rx_cqe) {
- kfree(rq->wqe.frag_info);
err = -EINVAL;
netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
- goto err_rq_wq_destroy;
+ goto err_free;
}
- byte_count = params->lro_en ?
- params->lro_wqe_sz :
- MLX5E_SW2HW_MTU(params, params->sw_mtu);
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (MLX5_IPSEC_DEV(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
- rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
-
- /* calc the required page order */
- rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
- npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
- rq->buff.page_order = order_base_2(npages);
-
- byte_count |= MLX5_HW_START_PADDING;
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
rq->mkey_be = c->mkey_be;
}
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+ /* Create a page_pool and register it with rxq */
+ pp_params.order = 0;
+ pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
+ pp_params.pool_size = pool_size;
+ pp_params.nid = cpu_to_node(c->cpu);
+ pp_params.dev = c->pdev;
+ pp_params.dma_dir = rq->buff.map_dir;
+
+ /* page_pool can be used even when there is no rq->xdp_prog,
+ * given page_pool does not handle DMA mapping there is no
+ * required state to clear. And page_pool gracefully handle
+ * elevated refcnt.
+ */
+ rq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->page_pool)) {
+ err = PTR_ERR(rq->page_pool);
+ rq->page_pool = NULL;
+ goto err_free;
+ }
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL, rq->page_pool);
+ if (err)
+ goto err_free;
+ for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ struct mlx5e_rx_wqe_ll *wqe =
+ mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
+ u32 byte_count =
+ rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
- wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
- }
+ wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
+ wqe->data[0].byte_count = cpu_to_be32(byte_count);
+ wqe->data[0].lkey = rq->mkey_be;
+ } else {
+ struct mlx5e_rx_wqe_cyc *wqe =
+ mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
+ int f;
+
+ for (f = 0; f < rq->wqe.info.num_frags; f++) {
+ u32 frag_size = rq->wqe.info.arr[f].frag_size |
+ MLX5_HW_START_PADDING;
- wqe->data.byte_count = cpu_to_be32(byte_count);
- wqe->data.lkey = rq->mkey_be;
+ wqe->data[f].byte_count = cpu_to_be32(frag_size);
+ wqe->data[f].lkey = rq->mkey_be;
+ }
+ /* check if num_frags is not a pow of two */
+ if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
+ wqe->data[f].byte_count = 0;
+ wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ wqe->data[f].addr = 0;
+ }
+ }
}
INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
@@ -541,13 +667,23 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
return 0;
-err_destroy_umr_mkey:
- mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+err_free:
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ kfree(rq->mpwqe.info);
+ mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ kvfree(rq->wqe.frags);
+ mlx5e_free_di_list(rq);
+ }
err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (rq->page_pool)
+ page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -561,14 +697,17 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
bpf_prog_put(rq->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (rq->page_pool)
+ page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->wqe.frag_info);
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ kvfree(rq->wqe.frags);
+ mlx5e_free_di_list(rq);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
@@ -608,8 +747,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
- mlx5_fill_page_array(&rq->wq_ctrl.buf,
- (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+ mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
@@ -710,56 +849,58 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
-static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{
- unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
+ unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
struct mlx5e_channel *c = rq->channel;
- struct mlx5_wq_ll *wq = &rq->wq;
- u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
+ u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
- while (time_before(jiffies, exp_time)) {
- if (wq->cur_sz >= min_wqes)
+ do {
+ if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
return 0;
msleep(20);
- }
+ } while (time_before(jiffies, exp_time));
+
+ netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+ c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
- netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
- rq->rqn, wq->cur_sz, min_wqes);
return -ETIMEDOUT;
}
static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
- struct mlx5_wq_ll *wq = &rq->wq;
- struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;
- /* UMR WQE (if in progress) is always at wq->head */
- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- rq->mpwqe.umr_in_progress)
- mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
-
- while (!mlx5_wq_ll_is_empty(wq)) {
- wqe_ix_be = *wq->tail_next;
- wqe_ix = be16_to_cpu(wqe_ix_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
- rq->dealloc_wqe(rq, wqe_ix);
- mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
- &wqe->next.next_wqe_index);
- }
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
- /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
- * but yet to be re-posted.
- */
- int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ /* UMR WQE (if in progress) is always at wq->head */
+ if (rq->mpwqe.umr_in_progress)
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ struct mlx5e_rx_wqe_ll *wqe;
+
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+ } else {
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
+ while (!mlx5_wq_cyc_is_empty(wq)) {
+ wqe_ix = mlx5_wq_cyc_get_tail(wq);
rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_cyc_pop(wq);
+ }
}
+
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
@@ -782,7 +923,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_destroy_rq;
if (params->rx_dim_enabled)
- c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
+ __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
return 0;
@@ -797,13 +938,15 @@ err_free_rq:
static void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
struct mlx5e_icosq *sq = &rq->channel->icosq;
- u16 pi = sq->pc & sq->wq.sz_m1;
+ struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *nopwqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+ nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -846,6 +989,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_wq_cyc *wq = &sq->wq;
int err;
sq->pdev = c->pdev;
@@ -855,10 +999,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->min_inline_mode = params->tx_min_inline_mode;
param->wq.db_numa_node = cpu_to_node(c->cpu);
- err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -901,23 +1045,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_wq_cyc *wq = &sq->wq;
int err;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
param->wq.db_numa_node = cpu_to_node(c->cpu);
- err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
-
return 0;
err_sq_wq_destroy:
@@ -962,10 +1105,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq)
+ struct mlx5e_txqsq *sq,
+ int tc)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_wq_cyc *wq = &sq->wq;
int err;
sq->pdev = c->pdev;
@@ -976,21 +1121,25 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+ if (mlx5_accel_is_tls_device(c->priv->mdev))
+ set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
param->wq.db_numa_node = cpu_to_node(c->cpu);
- err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
+ sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
return 0;
@@ -1051,7 +1200,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
- mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+ mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_sq(mdev, in, inlen, sqn);
@@ -1130,13 +1280,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq)
+ struct mlx5e_txqsq *sq,
+ int tc)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
int err;
- err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
+ err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
if (err)
return err;
@@ -1153,6 +1304,9 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
+ if (params->tx_dim_enabled)
+ sq->state |= BIT(MLX5E_SQ_STATE_AM);
+
return 0;
err_free_txqsq:
@@ -1191,6 +1345,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5e_channel *c = sq->channel;
+ struct mlx5_wq_cyc *wq = &sq->wq;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* prevent netif_tx_wake_queue */
@@ -1199,12 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
netif_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
- if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
+ if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
struct mlx5e_tx_wqe *nop;
- sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
- nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
+ sq->db.wqe_info[pi].skb = NULL;
+ nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
}
@@ -1319,7 +1475,7 @@ static void mlx5e_sq_recover(struct work_struct *work)
return;
mlx5e_reset_txqsq_cc_pc(sq);
- sq->stats.recover++;
+ sq->stats->recover++;
recover->last_recover = jiffies;
mlx5e_activate_txqsq(sq);
}
@@ -1488,7 +1644,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
static void mlx5e_free_cq(struct mlx5e_cq *cq)
{
- mlx5_cqwq_destroy(&cq->wq_ctrl);
+ mlx5_wq_destroy(&cq->wq_ctrl);
}
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
@@ -1504,7 +1660,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
- sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1513,7 +1669,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
memcpy(cqc, param->cqc, sizeof(param->cqc));
- mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
@@ -1521,7 +1677,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
- MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -1614,14 +1770,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
- int err;
- int tc;
+ struct mlx5e_priv *priv = c->priv;
+ int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
for (tc = 0; tc < params->num_tc; tc++) {
- int txq_ix = c->ix + tc * params->num_channels;
+ int txq_ix = c->ix + tc * max_nch;
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
- params, &cparam->sq, &c->sq[tc]);
+ params, &cparam->sq, &c->sq[tc], tc);
if (err)
goto err_close_sqs;
}
@@ -1751,6 +1907,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
+ c->stats = &priv->channel_stats[ix].ch;
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq);
@@ -1864,6 +2021,76 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
kfree(c);
}
+#define DEFAULT_FRAG_SIZE (2048)
+
+static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_frags_info *info)
+{
+ u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ int frag_size_max = DEFAULT_FRAG_SIZE;
+ u32 buf_size = 0;
+ int i;
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (MLX5_IPSEC_DEV(mdev))
+ byte_count += MLX5E_METADATA_ETHER_LEN;
+#endif
+
+ if (mlx5e_rx_is_linear_skb(mdev, params)) {
+ int frag_stride;
+
+ frag_stride = mlx5e_rx_get_linear_frag_sz(params);
+ frag_stride = roundup_pow_of_two(frag_stride);
+
+ info->arr[0].frag_size = byte_count;
+ info->arr[0].frag_stride = frag_stride;
+ info->num_frags = 1;
+ info->wqe_bulk = PAGE_SIZE / frag_stride;
+ goto out;
+ }
+
+ if (byte_count > PAGE_SIZE +
+ (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+ frag_size_max = PAGE_SIZE;
+
+ i = 0;
+ while (buf_size < byte_count) {
+ int frag_size = byte_count - buf_size;
+
+ if (i < MLX5E_MAX_RX_FRAGS - 1)
+ frag_size = min(frag_size, frag_size_max);
+
+ info->arr[i].frag_size = frag_size;
+ info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+
+ buf_size += frag_size;
+ i++;
+ }
+ info->num_frags = i;
+ /* number of different wqes sharing a page */
+ info->wqe_bulk = 1 + (info->num_frags % 2);
+
+out:
+ info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
+ info->log_num_frags = order_base_2(info->num_frags);
+}
+
+static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
+{
+ int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
+
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ sz += sizeof(struct mlx5e_rx_wqe_ll);
+ break;
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ sz += sizeof(struct mlx5e_rx_wqe_cyc);
+ }
+
+ return order_base_2(sz);
+}
+
static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_rq_param *param)
@@ -1871,6 +2098,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int ndsegs = 1;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -1880,23 +2108,24 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wqe_stride_size,
mlx5e_mpwqe_get_log_stride_size(mdev, params) -
MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
+ mlx5e_build_rq_frags_info(mdev, params, &param->frags_info);
+ ndsegs = param->frags_info.num_frags;
}
+ MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
- MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_stride,
+ mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
- param->wq.linear = 1;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@@ -1906,8 +2135,9 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
- MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, log_wq_stride,
+ mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
@@ -1958,7 +2188,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
mlx5e_mpwqe_get_log_num_strides(mdev, params);
break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
}
@@ -2084,13 +2314,11 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
int err = 0;
int i;
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
- if (err)
- break;
- }
+ for (i = 0; i < chs->num; i++)
+ err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
+ err ? 0 : 20000);
- return err;
+ return err ? -ETIMEDOUT : 0;
}
static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
@@ -2580,15 +2808,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
-static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
+static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
{
- struct mlx5e_channel *c;
- struct mlx5e_txqsq *sq;
+ int max_nch = priv->profile->max_nch(priv->mdev);
int i, tc;
- for (i = 0; i < priv->channels.num; i++)
+ for (i = 0; i < max_nch; i++)
for (tc = 0; tc < priv->profile->max_tc; tc++)
- priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
+ priv->channel_tc2txq[i][tc] = i + tc * max_nch;
+}
+
+static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+ int i, tc;
for (i = 0; i < priv->channels.num; i++) {
c = priv->channels.c[i];
@@ -2608,7 +2842,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->channels.num);
- mlx5e_build_channels_tx_maps(priv);
+ mlx5e_build_tx2sq_maps(priv);
mlx5e_activate_channels(&priv->channels);
netif_tx_start_all_queues(priv->netdev);
@@ -2760,8 +2994,8 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
param->wq.db_numa_node = param->wq.buf_numa_node;
- err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
- &rq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
+ &rq->wq_ctrl);
if (err)
return err;
@@ -3085,6 +3319,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
if (err)
goto out;
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+ new_channels.params.num_tc);
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
out:
mutex_unlock(&priv->state_lock);
@@ -3093,22 +3329,23 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *cls_flower)
+ struct tc_cls_flower_offload *cls_flower,
+ int flags)
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower);
+ return mlx5e_configure_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower);
+ return mlx5e_delete_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower);
+ return mlx5e_stats_flower(priv, cls_flower, flags);
default:
return -EOPNOTSUPP;
}
}
-int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct mlx5e_priv *priv = cb_priv;
@@ -3117,7 +3354,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
default:
return -EOPNOTSUPP;
}
@@ -3174,6 +3411,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
} else {
+ mlx5e_grp_sw_update_stats(priv);
stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets;
@@ -3248,12 +3486,18 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
old_params = &priv->channels.params;
+ if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ netdev_warn(netdev, "can't set LRO with legacy RQ\n");
+ err = -EINVAL;
+ goto out;
+ }
+
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
new_channels.params = *old_params;
new_channels.params.lro_en = enable;
- if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
reset = false;
@@ -3417,22 +3661,31 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params *params;
mutex_lock(&priv->state_lock);
+ params = &priv->channels.params;
if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (!priv->channels.params.vlan_strip_disable)
+ if (!params->vlan_strip_disable)
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
+ if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ features &= ~NETIF_F_LRO;
+ if (params->lro_en)
+ netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
+ }
+
mutex_unlock(&priv->state_lock);
return features;
}
-static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
+ change_hw_mtu_cb set_mtu_cb)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels new_channels = {};
@@ -3450,7 +3703,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
- if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
@@ -3459,7 +3712,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
if (!reset) {
params->sw_mtu = new_mtu;
- mlx5e_set_dev_port_mtu(priv);
+ set_mtu_cb(priv);
netdev->mtu = params->sw_mtu;
goto out;
}
@@ -3468,7 +3721,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
goto out;
- mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
+ mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb);
netdev->mtu = new_channels.params.sw_mtu;
out:
@@ -3476,6 +3729,11 @@ out:
return err;
}
+static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
+{
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+}
+
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config config;
@@ -3770,7 +4028,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
return false;
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
- sq->channel->stats.eq_rearm++;
+ sq->channel->stats->eq_rearm++;
return true;
}
@@ -3970,7 +4228,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
- .ndo_change_mtu = mlx5e_change_mtu,
+ .ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
@@ -4038,7 +4296,7 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
u32 link_speed = 0;
u32 pci_bw = 0;
- mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_port_max_linkspeed(mdev, &link_speed);
pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
link_speed, pci_bw);
@@ -4049,18 +4307,48 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
- params->tx_cq_moderation.cq_period_mode = cq_period_mode;
+ struct net_dim_cq_moder moder;
+
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
- params->tx_cq_moderation.pkts =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- params->tx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ return moder;
+}
+
+static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ struct net_dim_cq_moder moder;
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- params->tx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+
+static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
+{
+ return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ if (params->tx_dim_enabled) {
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
+ } else {
+ params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
params->tx_cq_moderation.cq_period_mode ==
@@ -4069,28 +4357,12 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
- params->rx_cq_moderation.cq_period_mode = cq_period_mode;
-
- params->rx_cq_moderation.pkts =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
-
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
if (params->rx_dim_enabled) {
- switch (cq_period_mode) {
- case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
- params->rx_cq_moderation =
- net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
- break;
- case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
- default:
- params->rx_cq_moderation =
- net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
- }
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
+ } else {
+ params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
}
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -4135,9 +4407,16 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */
- if (mlx5e_striding_rq_possible(mdev, params))
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ,
- !slow_pci_heuristic(mdev));
+ /* Prefer Striding RQ, unless any of the following holds:
+ * - Striding RQ configuration is not possible/supported.
+ * - Slow PCI heuristic.
+ * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
+ */
+ if (!slow_pci_heuristic(mdev) &&
+ mlx5e_striding_rq_possible(mdev, params) &&
+ (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
+ !mlx5e_rx_is_linear_skb(mdev, params)))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -4154,6 +4433,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
@@ -4179,6 +4459,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_opened_tc = 1;
mlx5e_build_nic_params(mdev, &priv->channels.params,
profile->max_nch(mdev), netdev->mtu);
@@ -4243,7 +4524,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
@@ -4320,6 +4602,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
#endif
mlx5e_ipsec_build_netdev(priv);
+ mlx5e_tls_build_netdev(priv);
}
static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4361,12 +4644,17 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
+ err = mlx5e_tls_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
+ mlx5e_build_tc2txq_maps(priv);
mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
}
@@ -4398,7 +4686,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_direct_tirs;
}
- err = mlx5e_tc_init(priv);
+ err = mlx5e_tc_nic_init(priv);
if (err)
goto err_destroy_flow_steering;
@@ -4419,7 +4707,7 @@ err_destroy_indirect_rqts:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
- mlx5e_tc_cleanup(priv);
+ mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 876c3e4c6193..57987f6546e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -66,18 +66,36 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
};
-#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+struct vport_stats {
+ u64 vport_rx_packets;
+ u64 vport_tx_packets;
+ u64 vport_rx_bytes;
+ u64 vport_tx_bytes;
+};
+
+static const struct counter_desc vport_rep_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
+};
+
+#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
static void mlx5e_rep_get_strings(struct net_device *dev,
u32 stringset, uint8_t *data)
{
- int i;
+ int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
strcpy(data + (i * ETH_GSTRING_LEN),
sw_rep_stats_desc[i].format);
+ for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
+ strcpy(data + (i * ETH_GSTRING_LEN),
+ vport_rep_stats_desc[j].format);
break;
}
}
@@ -116,13 +134,13 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- rq_stats = &c->rq.stats;
+ rq_stats = c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = &c->sq[j].stats;
+ sq_stats = c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@@ -130,17 +148,11 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
}
}
-static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
-{
- mlx5e_rep_update_sw_counters(priv);
- mlx5e_rep_update_hw_counters(priv);
-}
-
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i;
+ int i, j;
if (!data)
return;
@@ -148,18 +160,23 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_rep_update_sw_counters(priv);
+ mlx5e_rep_update_hw_counters(priv);
mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i);
+
+ for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
+ data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ vport_rep_stats_desc, j);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_REP_COUNTERS;
+ return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
default:
return -EOPNOTSUPP;
}
@@ -681,8 +698,8 @@ static int mlx5e_rep_open(struct net_device *dev)
goto unlock;
if (!mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -699,8 +716,8 @@ static int mlx5e_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -723,15 +740,31 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *cls_flower)
+ struct tc_cls_flower_offload *cls_flower, int flags)
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower);
+ return mlx5e_configure_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower);
+ return mlx5e_delete_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower);
+ return mlx5e_stats_flower(priv, cls_flower, flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
default:
return -EOPNOTSUPP;
}
@@ -747,7 +780,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
default:
return -EOPNOTSUPP;
}
@@ -832,6 +865,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sw_stats *sstats = &priv->stats.sw;
+ mlx5e_rep_update_sw_counters(priv);
+
stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets;
@@ -865,6 +900,11 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
+static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
+{
+ return mlx5e_change_mtu(netdev, new_mtu, NULL);
+}
+
static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_open = mlx5e_rep_open,
.ndo_stop = mlx5e_rep_close,
@@ -874,6 +914,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
+ .ndo_change_mtu = mlx5e_change_rep_mtu,
};
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
@@ -886,7 +927,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = mtu;
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
- params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
@@ -900,6 +941,10 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
static void mlx5e_build_rep_netdev(struct net_device *netdev)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
+
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
netdev->watchdog_timeo = 15 * HZ;
@@ -912,6 +957,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_TC;
eth_hw_addr_random(netdev);
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
}
static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -965,14 +1014,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
}
rpriv->vport_rx_rule = flow_rule;
- err = mlx5e_tc_init(priv);
- if (err)
- goto err_del_flow_rule;
-
return 0;
-err_del_flow_rule:
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -984,7 +1027,6 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- mlx5e_tc_cleanup(priv);
mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
@@ -1014,7 +1056,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
- .update_stats = mlx5e_rep_update_stats,
+ .update_stats = mlx5e_rep_update_hw_counters,
.max_nch = mlx5e_get_rep_max_num_channels,
.update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
@@ -1042,8 +1084,15 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (err)
goto err_remove_sqs;
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&rpriv->tc_ht);
+ if (err)
+ goto err_neigh_cleanup;
+
return 0;
+err_neigh_cleanup:
+ mlx5e_rep_neigh_cleanup(rpriv);
err_remove_sqs:
mlx5e_remove_sqs_fwd_rules(priv);
return err;
@@ -1058,9 +1107,8 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
- /* clean (and re-init) existing uplink offloaded TC rules */
- mlx5e_tc_cleanup(priv);
- mlx5e_tc_init(priv);
+ /* clean uplink offloaded TC rules, delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
mlx5e_rep_neigh_cleanup(rpriv);
}
@@ -1107,7 +1155,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev);
- err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
+ err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
if (err)
goto err_neigh_cleanup;
@@ -1122,7 +1170,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return 0;
err_egdev_cleanup:
- tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
err_neigh_cleanup:
@@ -1151,7 +1199,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev);
- tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b9b481f2833a..844d32d5c29f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -59,6 +59,7 @@ struct mlx5e_rep_priv {
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
+ struct rhashtable tc_ht; /* valid for uplink rep */
};
static inline
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1ff0b0e93804..d3a1dd20e41d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -37,6 +37,7 @@
#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
+#include <net/page_pool.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -53,7 +54,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
void *data)
{
- u32 ci = cqcc & cq->wq.fbc.sz_m1;
+ u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc);
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
}
@@ -64,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
- rq->stats.cqe_compress_blks++;
+ rq->stats->cqe_compress_blks++;
}
static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
@@ -75,10 +76,11 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
{
- struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
- u8 op_own = (cqcc >> fbc->log_sz) & 1;
- u32 wq_sz = 1 << fbc->log_sz;
- u32 ci = cqcc & fbc->sz_m1;
+ struct mlx5_cqwq *wq = &cq->wq;
+
+ u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
+ u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
+ u32 wq_sz = mlx5_cqwq_get_size(wq);
u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) {
@@ -111,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
mpwrq_get_cqe_consumed_strides(&cq->title);
else
cq->decmprs_wqe_counter =
- (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
+ mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1);
}
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
@@ -144,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
cq->wq.cc = cqcc;
cq->decmprs_left -= cqe_count;
- rq->stats.cqe_compress_pkts += cqe_count;
+ rq->stats->cqe_compress_pkts += cqe_count;
return cqe_count;
}
@@ -162,8 +164,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
}
-#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
-
static inline bool mlx5e_page_is_reserved(struct page *page)
{
return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
@@ -174,14 +174,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
{
struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
+ struct mlx5e_rq_stats *stats = rq->stats;
if (tail_next == cache->head) {
- rq->stats.cache_full++;
+ stats->cache_full++;
return false;
}
if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
- rq->stats.cache_waive++;
+ stats->cache_waive++;
return false;
}
@@ -194,23 +195,24 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
+ struct mlx5e_rq_stats *stats = rq->stats;
if (unlikely(cache->head == cache->tail)) {
- rq->stats.cache_empty++;
+ stats->cache_empty++;
return false;
}
if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
- rq->stats.cache_busy++;
+ stats->cache_busy++;
return false;
}
*dma_info = cache->page_cache[cache->head];
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
- rq->stats.cache_reuse++;
+ stats->cache_reuse++;
dma_sync_single_for_device(rq->pdev, dma_info->addr,
- RQ_PAGE_SIZE(rq),
+ PAGE_SIZE,
DMA_FROM_DEVICE);
return true;
}
@@ -221,12 +223,12 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
if (mlx5e_rx_cache_get(rq, dma_info))
return 0;
- dma_info->page = dev_alloc_pages(rq->buff.page_order);
+ dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
if (unlikely(!dma_info->page))
return -ENOMEM;
dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
- RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ PAGE_SIZE, rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(dma_info->page);
dma_info->page = NULL;
@@ -236,73 +238,124 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
return 0;
}
+static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
+}
+
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle)
{
- if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
- return;
+ if (likely(recycle)) {
+ if (mlx5e_rx_cache_put(rq, dma_info))
+ return;
- dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
- rq->buff.map_dir);
- put_page(dma_info->page);
+ mlx5e_page_dma_unmap(rq, dma_info);
+ page_pool_recycle_direct(rq->page_pool, dma_info->page);
+ } else {
+ mlx5e_page_dma_unmap(rq, dma_info);
+ put_page(dma_info->page);
+ }
}
-static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi)
+static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *frag)
{
- return rq->wqe.page_reuse && wi->di.page &&
- (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) &&
- !mlx5e_page_is_reserved(wi->di.page);
+ int err = 0;
+
+ if (!frag->offset)
+ /* On first frag (offset == 0), replenish page (dma_info actually).
+ * Other frags that point to the same dma_info (with a different
+ * offset) should just use the new one without replenishing again
+ * by themselves.
+ */
+ err = mlx5e_page_alloc_mapped(rq, frag->di);
+
+ return err;
}
-static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
+ struct mlx5e_wqe_frag_info *frag)
{
- struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
+ if (frag->last_in_page)
+ mlx5e_page_release(rq, frag->di, true);
+}
- /* check if page exists, hence can be reused */
- if (!wi->di.page) {
- if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di)))
- return -ENOMEM;
- wi->offset = 0;
+static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
+{
+ return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
+}
+
+static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
+ u16 ix)
+{
+ struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
+ int err;
+ int i;
+
+ for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+ err = mlx5e_get_rx_frag(rq, frag);
+ if (unlikely(err))
+ goto free_frags;
+
+ wqe->data[i].addr = cpu_to_be64(frag->di->addr +
+ frag->offset + rq->buff.headroom);
}
- wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom);
return 0;
+
+free_frags:
+ while (--i >= 0)
+ mlx5e_put_rx_frag(rq, --frag);
+
+ return err;
}
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi)
{
- mlx5e_page_release(rq, &wi->di, true);
- wi->di.page = NULL;
+ int i;
+
+ for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
+ mlx5e_put_rx_frag(rq, wi);
}
-static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi)
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- if (mlx5e_page_reuse(rq, wi)) {
- rq->stats.page_reuse++;
- return;
- }
+ struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
mlx5e_free_rx_wqe(rq, wi);
}
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
{
- struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ int err;
+ int i;
- if (wi->di.page)
- mlx5e_free_rx_wqe(rq, wi);
+ for (i = 0; i < wqe_bulk; i++) {
+ struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
+
+ err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
+ if (unlikely(err))
+ goto free_wqes;
+ }
+
+ return 0;
+
+free_wqes:
+ while (--i >= 0)
+ mlx5e_dealloc_rx_wqe(rq, ix + i);
+
+ return err;
}
-static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_dma_info *di,
- u32 frag_offset, u32 len)
+static inline void
+mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
+ unsigned int truesize)
{
- unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz));
-
dma_sync_single_for_cpu(rq->pdev,
di->addr + frag_offset,
len, DMA_FROM_DEVICE);
@@ -312,29 +365,33 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
}
static inline void
+mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
+ struct mlx5e_dma_info *dma_info,
+ int offset_from, int offset_to, u32 headlen)
+{
+ const void *from = page_address(dma_info->page) + offset_from;
+ /* Aligning len to sizeof(long) optimizes memcpy performance */
+ unsigned int len = ALIGN(headlen, sizeof(long));
+
+ dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, offset_to, from, len);
+}
+
+static inline void
mlx5e_copy_skb_header_mpwqe(struct device *pdev,
struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
u32 offset, u32 headlen)
{
u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
- unsigned int len;
- /* Aligning len to sizeof(long) optimizes memcpy performance */
- len = ALIGN(headlen_pg, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len);
+ mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg);
if (unlikely(offset + headlen > PAGE_SIZE)) {
dma_info++;
- headlen_pg = len;
- len = ALIGN(headlen - headlen_pg, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr, len,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data_offset(skb, headlen_pg,
- page_address(dma_info->page),
- len);
+ mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg,
+ headlen - headlen_pg);
}
}
@@ -352,8 +409,8 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
{
- struct mlx5_wq_ll *wq = &rq->wq;
- struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+ struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+ struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
rq->mpwqe.umr_in_progress = false;
@@ -370,6 +427,22 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
}
+static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
+ struct mlx5_wq_cyc *wq,
+ u16 pi, u16 frag_pi)
+{
+ struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
+ u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
+
+ edge_wi = wi + nnops;
+
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+ wi->opcode = MLX5_OPCODE_NOP;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+}
+
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -378,14 +451,16 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
+ u16 pi, frag_pi;
int err;
- u16 pi;
int i;
- /* fill sq edge with nops to avoid wqe wrap around */
- while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
+
+ if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
+ mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
}
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -421,7 +496,7 @@ err_unmap:
dma_info--;
mlx5e_page_release(rq, dma_info, true);
}
- rq->stats.buff_alloc_err++;
+ rq->stats->buff_alloc_err++;
return err;
}
@@ -435,31 +510,34 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
- struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ u8 wqe_bulk;
int err;
- if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- if (mlx5_wq_ll_is_full(wq))
+ wqe_bulk = rq->wqe.info.wqe_bulk;
+
+ if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
return false;
do {
- struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+ u16 head = mlx5_wq_cyc_get_head(wq);
- err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
+ err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
if (unlikely(err)) {
- rq->stats.buff_alloc_err++;
+ rq->stats->buff_alloc_err++;
break;
}
- mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
- } while (!mlx5_wq_ll_is_full(wq));
+ mlx5_wq_cyc_push_n(wq, wqe_bulk);
+ } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
- mlx5_wq_ll_update_db_record(wq);
+ mlx5_wq_cyc_update_db_record(wq);
return !!err;
}
@@ -470,7 +548,7 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+ u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
mlx5_cqwq_pop(&cq->wq);
@@ -496,7 +574,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
- if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
@@ -511,9 +589,9 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
- struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
- if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
@@ -660,6 +738,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
struct sk_buff *skb,
bool lro)
{
+ struct mlx5e_rq_stats *stats = rq->stats;
int network_depth = 0;
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
@@ -667,7 +746,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- rq->stats.csum_unnecessary++;
+ stats->csum_unnecessary++;
return;
}
@@ -685,7 +764,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (unlikely(netdev->features & NETIF_F_RXFCS))
skb->csum = csum_add(skb->csum,
(__force __wsum)mlx5e_get_fcs(skb));
- rq->stats.csum_complete++;
+ stats->csum_complete++;
return;
}
@@ -695,15 +774,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
- rq->stats.csum_unnecessary_inner++;
+ stats->csum_unnecessary_inner++;
return;
}
- rq->stats.csum_unnecessary++;
+ stats->csum_unnecessary++;
return;
}
csum_none:
skb->ip_summed = CHECKSUM_NONE;
- rq->stats.csum_none++;
+ stats->csum_none++;
}
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
@@ -711,20 +790,20 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
+ u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ struct mlx5e_rq_stats *stats = rq->stats;
struct net_device *netdev = rq->netdev;
- int lro_num_seg;
skb->mac_len = ETH_HLEN;
- lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
- rq->stats.packets += lro_num_seg - 1;
- rq->stats.lro_packets++;
- rq->stats.lro_bytes += cqe_bcnt;
+ stats->packets += lro_num_seg - 1;
+ stats->lro_packets++;
+ stats->lro_bytes += cqe_bcnt;
}
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
@@ -739,7 +818,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (cqe_has_vlan(cqe)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info));
- rq->stats.removed_vlan_packets++;
+ stats->removed_vlan_packets++;
}
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
@@ -753,8 +832,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- rq->stats.packets++;
- rq->stats.bytes += cqe_bcnt;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->packets++;
+ stats->bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
@@ -762,7 +843,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe;
- u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -775,7 +856,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
{
struct mlx5e_xdpsq *sq = &rq->xdpsq;
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = sq->pc & wq->sz_m1;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
@@ -786,10 +867,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
- rq->stats.xdp_drop++;
+ stats->xdp_drop++;
return false;
}
@@ -799,7 +882,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
mlx5e_xmit_xdp_doorbell(sq);
sq->db.doorbell = false;
}
- rq->stats.xdp_tx_full++;
+ stats->xdp_tx_full++;
return false;
}
@@ -833,18 +916,19 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.doorbell = true;
- rq->stats.xdp_tx++;
+ stats->xdp_tx++;
return true;
}
/* returns true if packet was consumed by xdp */
-static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
{
- const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
u32 act;
+ int err;
if (!prog)
return false;
@@ -865,12 +949,21 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
trace_xdp_exception(rq->netdev, prog, act);
return true;
+ case XDP_REDIRECT:
+ /* When XDP enabled then page-refcnt==1 here */
+ err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ if (!err) {
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
+ rq->xdpsq.db.redirect_flush = true;
+ mlx5e_page_dma_unmap(rq, di);
+ }
+ return true;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP:
- rq->stats.xdp_drop++;
+ rq->stats->xdp_drop++;
return true;
}
}
@@ -883,7 +976,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
struct sk_buff *skb = build_skb(va, frag_size);
if (unlikely(!skb)) {
- rq->stats.buff_alloc_err++;
+ rq->stats->buff_alloc_err++;
return NULL;
}
@@ -893,11 +986,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
-static inline
-struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+struct sk_buff *
+mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di = &wi->di;
+ struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb;
void *va, *data;
@@ -910,11 +1003,11 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
+ prefetchw(va); /* xdp_frame data area */
prefetch(data);
- wi->offset += frag_size;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
+ rq->stats->wqe_err++;
return NULL;
}
@@ -934,41 +1027,87 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb;
}
+struct sk_buff *
+mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
+{
+ struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_wqe_frag_info *head_wi = wi;
+ u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
+ u16 frag_headlen = headlen;
+ u16 byte_cnt = cqe_bcnt - headlen;
+ struct sk_buff *skb;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats->wqe_err++;
+ return NULL;
+ }
+
+ /* XDP is not supported in this configuration, as incoming packets
+ * might spread among multiple pages.
+ */
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return NULL;
+ }
+
+ prefetchw(skb->data);
+
+ while (byte_cnt) {
+ u16 frag_consumed_bytes =
+ min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
+
+ mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
+ frag_consumed_bytes, frag_info->frag_stride);
+ byte_cnt -= frag_consumed_bytes;
+ frag_headlen = 0;
+ frag_info++;
+ wi++;
+ }
+
+ /* copy header */
+ mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset,
+ 0, headlen);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+
+ return skb;
+}
+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
- struct mlx5e_rx_wqe *wqe;
- __be16 wqe_counter_be;
struct sk_buff *skb;
- u16 wqe_counter;
u32 cqe_bcnt;
+ u16 ci;
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- wi = &rq->wqe.frag_info[wqe_counter];
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- wi->di.page = NULL;
- /* do not return page to cache, it will be returned on XDP_TX completion */
- goto wq_ll_pop;
+ /* do not return page to cache,
+ * it will be returned on XDP_TX completion.
+ */
+ goto wq_cyc_pop;
}
- /* probably an XDP_DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi);
- goto wq_ll_pop;
+ goto free_wqe;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe_reuse(rq, wi);
-wq_ll_pop:
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+free_wqe:
+ mlx5e_free_rx_wqe(rq, wi);
+wq_cyc_pop:
+ mlx5_wq_cyc_pop(wq);
}
#ifdef CONFIG_MLX5_ESWITCH
@@ -978,29 +1117,26 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
- struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
- __be16 wqe_counter_be;
- u16 wqe_counter;
u32 cqe_bcnt;
+ u16 ci;
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- wi = &rq->wqe.frag_info[wqe_counter];
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
+ /* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- wi->di.page = NULL;
- /* do not return page to cache, it will be returned on XDP_TX completion */
- goto wq_ll_pop;
+ /* do not return page to cache,
+ * it will be returned on XDP_TX completion.
+ */
+ goto wq_cyc_pop;
}
- /* probably an XDP_DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi);
- goto wq_ll_pop;
+ goto free_wqe;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
@@ -1010,10 +1146,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe_reuse(rq, wi);
-wq_ll_pop:
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+free_wqe:
+ mlx5e_free_rx_wqe(rq, wi);
+wq_cyc_pop:
+ mlx5_wq_cyc_pop(wq);
}
#endif
@@ -1021,7 +1157,7 @@ struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
@@ -1029,9 +1165,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct sk_buff *skb;
skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
+ ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
if (unlikely(!skb)) {
- rq->stats.buff_alloc_err++;
+ rq->stats->buff_alloc_err++;
return NULL;
}
@@ -1045,9 +1181,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
u32 pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ unsigned int truesize =
+ ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset,
- pg_consumed_bytes);
+ mlx5e_add_skb_frag(rq, skb, di, frag_offset,
+ pg_consumed_bytes, truesize);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
di++;
@@ -1110,19 +1248,20 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
- struct mlx5e_rx_wqe *wqe;
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5_wq_ll *wq;
struct sk_buff *skb;
u16 cqe_bcnt;
wi->consumed_strides += cstrides;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
+ rq->stats->wqe_err++;
goto mpwrq_cqe_out;
}
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
- rq->stats.mpwqe_filler++;
+ rq->stats->mpwqe_filler++;
goto mpwrq_cqe_out;
}
@@ -1140,9 +1279,10 @@ mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
mlx5e_free_rx_mpwqe(rq, wi);
- mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
@@ -1152,7 +1292,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5_cqe64 *cqe;
int work_done = 0;
- if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (cq->decmprs_left)
@@ -1182,6 +1322,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
xdpsq->db.doorbell = false;
}
+ if (xdpsq->db.redirect_flush) {
+ xdp_do_flush_map();
+ xdpsq->db.redirect_flush = false;
+ }
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
@@ -1200,7 +1345,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sq = container_of(cq, struct mlx5e_xdpsq, cq);
- if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
@@ -1229,7 +1374,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
last_wqe = (sqcc == wqe_counter);
- ci = sqcc & sq->wq.sz_m1;
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
di = &sq->db.di[ci];
sqcc++;
@@ -1254,7 +1399,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
u16 ci;
while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
di = &sq->db.di[ci];
sq->cc++;
@@ -1272,6 +1417,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct mlx5e_rq_stats *stats = rq->stats;
struct hwtstamp_config *tstamp;
struct net_device *netdev;
struct mlx5e_priv *priv;
@@ -1333,27 +1479,24 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->dev = netdev;
- rq->stats.csum_complete++;
- rq->stats.packets++;
- rq->stats.bytes += cqe_bcnt;
+ stats->csum_complete++;
+ stats->packets++;
+ stats->bytes += cqe_bcnt;
}
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
- struct mlx5e_rx_wqe *wqe;
- __be16 wqe_counter_be;
struct sk_buff *skb;
- u16 wqe_counter;
u32 cqe_bcnt;
+ u16 ci;
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- wi = &rq->wqe.frag_info[wqe_counter];
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -1365,9 +1508,8 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
- mlx5e_free_rx_wqe_reuse(rq, wi);
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+ mlx5e_free_rx_wqe(rq, wi);
+ mlx5_wq_cyc_pop(wq);
}
#endif /* CONFIG_MLX5_CORE_IPOIB */
@@ -1376,38 +1518,34 @@ wq_free_wqe:
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
- struct mlx5e_rx_wqe *wqe;
- __be16 wqe_counter_be;
struct sk_buff *skb;
- u16 wqe_counter;
u32 cqe_bcnt;
+ u16 ci;
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- wi = &rq->wqe.frag_info[wqe_counter];
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
mlx5e_free_rx_wqe(rq, wi);
- goto wq_ll_pop;
+ goto wq_cyc_pop;
}
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
if (unlikely(!skb)) {
mlx5e_free_rx_wqe(rq, wi);
- goto wq_ll_pop;
+ goto wq_cyc_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe_reuse(rq, wi);
-wq_ll_pop:
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+ mlx5e_free_rx_wqe(rq, wi);
+wq_cyc_pop:
+ mlx5_wq_cyc_pop(wq);
}
#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 027f54ac1ca2..4d316cc9b008 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -100,7 +100,7 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
#ifdef CONFIG_INET
/* loopback test */
-#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN)
+#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b08c94422907..1646859974ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en_accel/ipsec.h"
+#include "en_accel/tls.h"
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
@@ -43,6 +44,12 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+#endif
+
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
@@ -57,11 +64,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
@@ -74,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
@@ -102,20 +108,19 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats temp, *s = &temp;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- struct mlx5e_ch_stats *ch_stats;
- int i, j;
+ int i;
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
- rq_stats = &c->rq.stats;
- ch_stats = &c->stats;
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+ struct mlx5e_channel_stats *channel_stats =
+ &priv->channel_stats[i];
+ struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
+ struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
+ int j;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
@@ -142,8 +147,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_waive += rq_stats->cache_waive;
s->ch_eq_rearm += ch_stats->eq_rearm;
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = &c->sq[j].stats;
+ for (j = 0; j < priv->max_opened_tc; j++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@@ -161,12 +166,13 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
s->tx_csum_none += sq_stats->csum_none;
s->tx_csum_partial += sq_stats->csum_partial;
+#ifdef CONFIG_MLX5_EN_TLS
+ s->tx_tls_ooo += sq_stats->tls_ooo;
+ s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+#endif
}
}
- s->link_down_events_phy = MLX5_GET(ppcnt_reg,
- priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
memcpy(&priv->stats.sw, s, sizeof(*s));
}
@@ -569,12 +575,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = {
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
-#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
{
+ /* "1" for link_down_events special counter */
return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_COUNTERS : 0;
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
}
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -582,10 +589,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
{
int i;
- if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
- for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
+
+ if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ return idx;
+
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
return idx;
}
@@ -593,11 +604,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
{
int i;
- if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
- for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
+ /* link_down_events_phy has special handling since it is not stored in __be64 format */
+ data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+
+ if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ return idx;
+
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
return idx;
}
@@ -1065,6 +1082,22 @@ static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
mlx5e_ipsec_update_stats(priv);
}
+static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_tls_get_count(priv);
+}
+
+static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+}
+
+static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ return idx + mlx5e_tls_get_stats(priv, data + idx);
+}
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -1104,11 +1137,11 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
static const struct counter_desc ch_stats_desc[] = {
@@ -1121,30 +1154,30 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
- return (NUM_RQ_STATS * priv->channels.num) +
- (NUM_CH_STATS * priv->channels.num) +
- (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
+ int max_nch = priv->profile->max_nch(priv->mdev);
+
+ return (NUM_RQ_STATS * max_nch) +
+ (NUM_CH_STATS * max_nch) +
+ (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
+ int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return idx;
-
- for (i = 0; i < priv->channels.num; i++)
+ for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i);
- for (i = 0; i < priv->channels.num; i++)
+ for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < priv->channels.num; i++)
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format,
@@ -1156,29 +1189,26 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- struct mlx5e_channels *channels = &priv->channels;
+ int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return idx;
-
- for (i = 0; i < channels->num; i++)
+ for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j);
- for (i = 0; i < channels->num; i++)
+ for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j);
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < channels->num; i++)
+ for (tc = 0; tc < priv->max_opened_tc; tc++)
+ for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j);
return idx;
@@ -1190,7 +1220,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats,
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
.update_stats = mlx5e_grp_sw_update_stats,
},
{
@@ -1268,6 +1297,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.update_stats = mlx5e_grp_ipsec_update_stats,
},
{
+ .get_num_stats = mlx5e_grp_tls_get_num_stats,
+ .fill_strings = mlx5e_grp_tls_fill_strings,
+ .fill_stats = mlx5e_grp_tls_fill_stats,
+ },
+ {
.get_num_stats = mlx5e_grp_channels_get_num_stats,
.fill_strings = mlx5e_grp_channels_fill_strings,
.fill_stats = mlx5e_grp_channels_fill_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53111a2df587..643153bb3607 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -75,11 +75,11 @@ struct mlx5e_sw_stats {
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
- u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 tx_xmit_more;
- u64 tx_cqe_err;
u64 tx_recover;
+ u64 tx_queue_wake;
+ u64 tx_cqe_err;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_buff_alloc_err;
@@ -93,8 +93,10 @@ struct mlx5e_sw_stats {
u64 rx_cache_waive;
u64 ch_eq_rearm;
- /* Special handling counters */
- u64 link_down_events_phy;
+#ifdef CONFIG_MLX5_EN_TLS
+ u64 tx_tls_ooo;
+ u64 tx_tls_resync_bytes;
+#endif
};
struct mlx5e_qcounter_stats {
@@ -194,13 +196,18 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+#ifdef CONFIG_MLX5_EN_TLS
+ u64 tls_ooo;
+ u64 tls_resync_bytes;
+#endif
/* less likely accessed in data path */
u64 csum_none;
u64 stopped;
- u64 wake;
u64 dropped;
- u64 cqe_err;
u64 recover;
+ /* dirtied @completion */
+ u64 wake ____cacheline_aligned_in_smp;
+ u64 cqe_err;
};
struct mlx5e_ch_stats {
@@ -233,4 +240,6 @@ struct mlx5e_stats_grp {
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
+void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
+
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b94276db3ce9..0edf4751a8ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -52,28 +52,37 @@
#include "eswitch.h"
#include "vxlan.h"
#include "fs_core.h"
+#include "en/port.h"
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
u32 hairpin_tirn;
+ u8 match_level;
struct mlx5_flow_table *hairpin_ft;
};
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
+
enum {
- MLX5E_TC_FLOW_ESWITCH = BIT(0),
- MLX5E_TC_FLOW_NIC = BIT(1),
- MLX5E_TC_FLOW_OFFLOADED = BIT(2),
- MLX5E_TC_FLOW_HAIRPIN = BIT(3),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
+ MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
+ MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
+ MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
+ MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
+ MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
};
+#define MLX5E_TC_MAX_SPLITS 1
+
struct mlx5e_tc_flow {
struct rhash_head node;
+ struct mlx5e_priv *priv;
u64 cookie;
u8 flags;
- struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */
@@ -97,7 +106,7 @@ enum {
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -607,7 +616,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
params.q_counter = priv->q_counter;
/* set hairpin pair per each 50Gbs share of the link */
- mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
+ mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
link_speed = max_t(u32, link_speed, 50000);
link_speed64 = link_speed;
do_div(link_speed64, 50000);
@@ -753,7 +762,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
table_created = true;
}
- parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ if (attr->match_level != MLX5_MATCH_NONE)
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
@@ -785,11 +796,11 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
+ counter = mlx5_flow_rule_counter(flow->rule[0]);
+ mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+ if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
@@ -835,7 +846,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- attr->out_rep = rpriv->rep;
+ attr->out_rep[attr->out_count] = rpriv->rep;
+ attr->out_mdev[attr->out_count++] = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -860,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule))
goto err_add_rule;
+
+ if (attr->mirror_count) {
+ flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
+ if (IS_ERR(flow->rule[1]))
+ goto err_fwd_rule;
+ }
}
return rule;
+err_fwd_rule:
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ rule = flow->rule[1];
err_add_rule:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -883,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
+ if (attr->mirror_count)
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
mlx5_eswitch_del_vlan_action(esw, attr);
@@ -919,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, &e->flows, encap) {
esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id;
- flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
- if (IS_ERR(flow->rule)) {
- err = PTR_ERR(flow->rule);
+ flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
continue;
}
+
+ if (esw_attr->mirror_count) {
+ flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
+ if (IS_ERR(flow->rule[1])) {
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
+ err = PTR_ERR(flow->rule[1]);
+ mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
+ err);
+ continue;
+ }
+ }
+
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
}
}
@@ -938,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ if (attr->mirror_count)
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
}
@@ -974,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
continue;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- counter = mlx5_flow_rule_counter(flow->rule);
+ counter = mlx5_flow_rule_counter(flow->rule[0]);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true;
@@ -982,6 +1021,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
}
+ if (neigh_used)
+ break;
}
if (neigh_used) {
@@ -1190,7 +1231,7 @@ vxlan_match_offload_err:
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
- u8 *min_inline)
+ u8 *match_level)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
@@ -1199,7 +1240,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0;
u8 ip_proto = 0;
- *min_inline = MLX5_INLINE_MODE_L2;
+ *match_level = MLX5_MATCH_NONE;
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -1249,58 +1290,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
-
- struct flow_dissector_key_control *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->mask);
- addr_type = key->addr_type;
-
- /* the HW doesn't support frag first/later */
- if (mask->flags & FLOW_DIS_FIRST_FRAG)
- return -EOPNOTSUPP;
-
- if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
- key->flags & FLOW_DIS_IS_FRAGMENT);
-
- /* the HW doesn't need L3 inline to match on frag=no */
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
- *min_inline = MLX5_INLINE_MODE_IP;
- }
- }
-
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- ip_proto = key->ip_proto;
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(key->n_proto));
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- mask->ip_proto);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- key->ip_proto);
-
- if (mask->ip_proto)
- *min_inline = MLX5_INLINE_MODE_IP;
- }
-
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -1324,6 +1313,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
+
+ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ *match_level = MLX5_MATCH_L2;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -1344,9 +1336,79 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+
+ *match_level = MLX5_MATCH_L2;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(key->n_proto));
+
+ if (mask->n_proto)
+ *match_level = MLX5_MATCH_L2;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+
+ struct flow_dissector_key_control *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->mask);
+ addr_type = key->addr_type;
+
+ /* the HW doesn't support frag first/later */
+ if (mask->flags & FLOW_DIS_FIRST_FRAG)
+ return -EOPNOTSUPP;
+
+ if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ key->flags & FLOW_DIS_IS_FRAGMENT);
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
+ *match_level = MLX5_INLINE_MODE_L2;
+ /* *** L2 attributes parsing up to here *** */
+ else
+ *match_level = MLX5_INLINE_MODE_IP;
}
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ ip_proto = key->ip_proto;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ key->ip_proto);
+
+ if (mask->ip_proto)
+ *match_level = MLX5_MATCH_L3;
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -1371,7 +1433,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
&key->dst, sizeof(key->dst));
if (mask->src || mask->dst)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@@ -1400,7 +1462,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
@@ -1428,9 +1490,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (mask->tos || mask->ttl)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
+ /* *** L3 attributes parsing up to here *** */
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -1471,7 +1535,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mask->src || mask->dst)
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ *match_level = MLX5_MATCH_L4;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
@@ -1490,7 +1554,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
ntohs(key->flags));
if (mask->flags)
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ *match_level = MLX5_MATCH_L4;
}
return 0;
@@ -1505,23 +1569,28 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
- u8 min_inline;
+ u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, &min_inline);
+ err = __parse_cls_flower(priv, spec, f, &match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != FDB_UPLINK_VPORT &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < min_inline)) {
+ esw->offloads.inline_mode < match_level)) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
- min_inline, esw->offloads.inline_mode);
+ match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ flow->esw_attr->match_level = match_level;
+ else
+ flow->nic_attr->match_level = match_level;
+
return err;
}
@@ -1578,7 +1647,6 @@ struct mlx5_fields {
static struct mlx5_fields fields[] = {
OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
@@ -1764,12 +1832,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
err = -EOPNOTSUPP; /* can't be all optimistic */
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
+ netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
goto out_err;
}
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
+ netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
goto out_err;
}
@@ -1793,8 +1861,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
cmd_masks = &masks[cmd];
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
- printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
- cmd);
+ netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
err = -EOPNOTSUPP;
@@ -1917,21 +1984,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
LIST_HEAD(actions);
+ u32 action = 0;
int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- attr->action = 0;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -1941,13 +2008,13 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
if (is_tcf_csum(a)) {
- if (csum_offload_supported(priv, attr->action,
+ if (csum_offload_supported(priv, action,
tcf_csum_update_flags(a)))
continue;
@@ -1961,8 +2028,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
@@ -1981,13 +2048,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
attr->flow_tag = mark;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
return -EINVAL;
}
+ attr->action = action;
if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP;
@@ -2044,6 +2112,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
return 0;
}
+static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+ struct net_device *peer_netdev)
+{
+ struct mlx5e_priv *peer_priv;
+
+ peer_priv = netdev_priv(peer_netdev);
+
+ return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
+ (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
+ same_hw_devs(priv, peer_priv) &&
+ MLX5_VPORT_MANAGER(peer_priv->mdev) &&
+ (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
+}
+
static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct net_device **out_dev,
@@ -2459,60 +2541,71 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
const struct tc_action *a;
LIST_HEAD(actions);
bool encap = false;
- int err = 0;
+ u32 action = 0;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
- memset(attr, 0, sizeof(*attr));
attr->in_rep = rpriv->rep;
+ attr->in_mdev = priv->mdev;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
if (is_tcf_pedit(a)) {
+ int err;
+
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ attr->mirror_count = attr->out_count;
continue;
}
if (is_tcf_csum(a)) {
- if (csum_offload_supported(priv, attr->action,
+ if (csum_offload_supported(priv, action,
tcf_csum_update_flags(a)))
continue;
return -EOPNOTSUPP;
}
- if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *out_dev;
+ if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
struct mlx5e_priv *out_priv;
+ struct net_device *out_dev;
out_dev = tcf_mirred_dev(a);
+ if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ pr_err("can't support more than %d output ports, can't offload forwarding\n",
+ attr->out_count);
+ return -EOPNOTSUPP;
+ }
+
if (switchdev_port_same_parent_id(priv->netdev,
- out_dev)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ out_dev) ||
+ is_merged_eswitch_dev(priv, out_dev)) {
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- attr->out_rep = rpriv->rep;
+ attr->out_rep[attr->out_count] = rpriv->rep;
+ attr->out_mdev[attr->out_count++] = out_priv->mdev;
} else if (encap) {
parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
@@ -2528,14 +2621,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
encap = true;
else
return -EOPNOTSUPP;
+ attr->mirror_count = attr->out_count;
continue;
}
if (is_tcf_vlan(a)) {
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan_vid = tcf_vlan_push_vid(a);
if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
attr->vlan_prio = tcf_vlan_push_prio(a);
@@ -2549,38 +2643,84 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
}
+ attr->mirror_count = attr->out_count;
continue;
}
if (is_tcf_tunnel_release(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
continue;
}
return -EINVAL;
}
+ attr->action = action;
if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP;
- return err;
+ if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void get_flags(int flags, u8 *flow_flags)
+{
+ u8 __flow_flags = 0;
+
+ if (flags & MLX5E_TC_INGRESS)
+ __flow_flags |= MLX5E_TC_FLOW_INGRESS;
+ if (flags & MLX5E_TC_EGRESS)
+ __flow_flags |= MLX5E_TC_FLOW_EGRESS;
+
+ *flow_flags = __flow_flags;
+}
+
+static const struct rhashtable_params tc_ht_params = {
+ .head_offset = offsetof(struct mlx5e_tc_flow, node),
+ .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
+ .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ return &uplink_rpriv->tc_ht;
+ } else
+ return &priv->fs.tc.ht;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f, int flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
int attr_size, err = 0;
u8 flow_flags = 0;
+ get_flags(flags, &flow_flags);
+
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (flow) {
+ netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
+ return 0;
+ }
+
if (esw && esw->mode == SRIOV_OFFLOADS) {
- flow_flags = MLX5E_TC_FLOW_ESWITCH;
+ flow_flags |= MLX5E_TC_FLOW_ESWITCH;
attr_size = sizeof(struct mlx5_esw_flow_attr);
} else {
- flow_flags = MLX5E_TC_FLOW_NIC;
+ flow_flags |= MLX5E_TC_FLOW_NIC;
attr_size = sizeof(struct mlx5_nic_flow_attr);
}
@@ -2593,6 +2733,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow->cookie = f->cookie;
flow->flags = flow_flags;
+ flow->priv = priv;
err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
if (err < 0)
@@ -2602,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
+ flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
+ flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
}
- if (IS_ERR(flow->rule)) {
- err = PTR_ERR(flow->rule);
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
if (err != -EAGAIN)
goto err_free;
}
@@ -2623,8 +2764,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
!(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
kvfree(parse_attr);
- err = rhashtable_insert_fast(&tc->ht, &flow->node,
- tc->ht_params);
+ err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err) {
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
@@ -2638,18 +2778,28 @@ err_free:
return err;
}
+#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
+#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
+
+static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
+{
+ if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
+ return true;
+
+ return false;
+}
+
int mlx5e_delete_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f, int flags)
{
+ struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
- flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
- tc->ht_params);
- if (!flow)
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
- rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+ rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
mlx5e_tc_del_flow(priv, flow);
@@ -2659,24 +2809,23 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
}
int mlx5e_stats_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f, int flags)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 bytes;
u64 packets;
u64 lastuse;
- flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
- tc->ht_params);
- if (!flow)
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
return 0;
- counter = mlx5_flow_rule_counter(flow->rule);
+ counter = mlx5_flow_rule_counter(flow->rule[0]);
if (!counter)
return 0;
@@ -2687,41 +2836,50 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
-static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
- .head_offset = offsetof(struct mlx5e_tc_flow, node),
- .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
- .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
- .automatic_shrinking = true,
-};
-
-int mlx5e_tc_init(struct mlx5e_priv *priv)
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
- tc->ht_params = mlx5e_tc_flow_ht_params;
- return rhashtable_init(&tc->ht, &tc->ht_params);
+ return rhashtable_init(&tc->ht, &tc_ht_params);
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
{
struct mlx5e_tc_flow *flow = ptr;
- struct mlx5e_priv *priv = arg;
+ struct mlx5e_priv *priv = flow->priv;
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
}
-void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
+ rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
tc->t = NULL;
}
}
+
+int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
+{
+ return rhashtable_init(tc_ht, &tc_ht_params);
+}
+
+void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
+{
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+}
+
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
+{
+ struct rhashtable *tc_ht = get_tc_ht(priv);
+
+ return atomic_read(&tc_ht->nelems);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index c14c263a739b..49436bf3b80a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -38,16 +38,26 @@
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5e_tc_init(struct mlx5e_priv *priv);
-void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
+
+enum {
+ MLX5E_TC_INGRESS = BIT(0),
+ MLX5E_TC_EGRESS = BIT(1),
+ MLX5E_TC_LAST_EXPORTED_BIT = 1,
+};
+
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
+void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f);
+ struct tc_cls_flower_offload *f, int flags);
int mlx5e_delete_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f);
+ struct tc_cls_flower_offload *f, int flags);
int mlx5e_stats_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f);
+ struct tc_cls_flower_offload *f, int flags);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -58,14 +68,11 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
-{
- return atomic_read(&priv->fs.tc.ht.nelems);
-}
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
#else /* CONFIG_MLX5_ESWITCH */
-static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; }
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5532aa3675c7..f29deb44bf3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -35,12 +35,21 @@
#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
-#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/en_accel.h"
#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+
+#ifndef CONFIG_MLX5_EN_TLS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
+#else
+/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
+ * enough room for a resync SKB, a normal SKB and a NOP
+ */
+#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+#endif
static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma)
@@ -179,28 +188,16 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
return min_t(u16, hlen, skb_headlen(skb));
}
-static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
- unsigned int *skb_len,
- unsigned int len)
-{
- *skb_len -= len;
- *skb_data += len;
-}
-
-static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
- unsigned char **skb_data,
- unsigned int *skb_len)
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
{
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz;
- memcpy(vhdr, *skb_data, cpy1_sz);
- mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
+ memcpy(vhdr, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
- memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
- mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
+ memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
static inline void
@@ -211,34 +208,31 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_partial_inner++;
+ sq->stats->csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
- sq->stats.csum_partial++;
+ sq->stats->csum_partial++;
}
} else
- sq->stats.csum_none++;
+ sq->stats->csum_none++;
}
static inline u16
-mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{
+ struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
- eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
-
if (skb->encapsulation) {
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
- sq->stats.tso_inner_packets++;
- sq->stats.tso_inner_bytes += skb->len - ihs;
+ stats->tso_inner_packets++;
+ stats->tso_inner_bytes += skb->len - ihs;
} else {
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
- sq->stats.tso_packets++;
- sq->stats.tso_bytes += skb->len - ihs;
+ stats->tso_packets++;
+ stats->tso_bytes += skb->len - ihs;
}
- *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
return ihs;
}
@@ -291,17 +285,34 @@ dma_unmap_wqe_err:
return -ENOMEM;
}
+static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
+ struct mlx5_wq_cyc *wq,
+ u16 pi, u16 frag_pi)
+{
+ struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
+ u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
+
+ edge_wi = wi + nnops;
+
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+ wi->skb = NULL;
+ wi->num_wqebbs = 1;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nop += nnops;
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
+ u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi;
wi->num_bytes = num_bytes;
wi->num_dma = num_dma;
- wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ wi->num_wqebbs = num_wqebbs;
wi->skb = skb;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
@@ -315,84 +326,108 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->pc += wi->num_wqebbs;
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq);
- sq->stats.stopped++;
+ sq->stats->stopped++;
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
-
- /* fill sq edge with nops to avoid wqe wrap around */
- while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->db.wqe_info[pi].skb = NULL;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- sq->stats.nop++;
- }
}
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi)
-{
- struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_eth_seg *eseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5e_tx_wqe_info *wi;
- unsigned char *skb_data = skb->data;
- unsigned int skb_len = skb->len;
- u8 opcode = MLX5_OPCODE_SEND;
- unsigned int num_bytes;
+ struct mlx5e_sq_stats *stats = sq->stats;
+ u16 ds_cnt, ds_cnt_inl = 0;
+ u16 headlen, ihs, frag_pi;
+ u8 num_wqebbs, opcode;
+ u32 num_bytes;
int num_dma;
- u16 headlen;
- u16 ds_cnt;
- u16 ihs;
-
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+ __be16 mss;
+ /* Calc ihs and ds cnt, no writes to wqe yet */
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
- sq->stats.packets += skb_shinfo(skb)->gso_segs;
+ opcode = MLX5_OPCODE_LSO;
+ mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+ num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ stats->packets += skb_shinfo(skb)->gso_segs;
} else {
- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ opcode = MLX5_OPCODE_SEND;
+ mss = 0;
+ ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- sq->stats.packets++;
+ stats->packets++;
}
- sq->stats.bytes += num_bytes;
- sq->stats.xmit_more += skb->xmit_more;
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ stats->bytes += num_bytes;
+ stats->xmit_more += skb->xmit_more;
+
+ headlen = skb->len - ihs - skb->data_len;
+ ds_cnt += !!headlen;
+ ds_cnt += skb_shinfo(skb)->nr_frags;
+
+ if (ihs) {
+ ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
+
+ ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
+ if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+ mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+ }
+
+ /* fill wqe */
+ wi = &sq->db.wqe_info[pi];
+ cseg = &wqe->ctrl;
+ eseg = &wqe->eth;
+ dseg = wqe->data;
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ eseg->mss = mss;
+
if (ihs) {
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
- ihs += VLAN_HLEN;
- sq->stats.added_vlan_packets++;
+ ihs -= VLAN_HLEN;
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
+ stats->added_vlan_packets++;
} else {
- memcpy(eseg->inline_hdr.start, skb_data, ihs);
- mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ memcpy(eseg->inline_hdr.start, skb->data, ihs);
}
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+ dseg += ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
- sq->stats.added_vlan_packets++;
+ stats->added_vlan_packets++;
}
- headlen = skb_len - skb->data_len;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
- (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
- num_bytes, num_dma, wi, cseg);
+ mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
+ num_dma, wi, cseg);
return NETDEV_TX_OK;
err_drop:
- sq->stats.dropped++;
+ stats->dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -401,21 +436,19 @@ err_drop:
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_tx_wqe *wqe;
+ struct mlx5e_txqsq *sq;
+ u16 pi;
- memset(wqe, 0, sizeof(*wqe));
+ sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
- skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
- if (unlikely(!skb))
- return NETDEV_TX_OK;
- }
+#ifdef CONFIG_MLX5_ACCEL
+ /* might send skbs and update wqe and pi */
+ skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
+ if (unlikely(!skb))
+ return NETDEV_TX_OK;
#endif
-
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
@@ -443,7 +476,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_txqsq, cq);
- if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
@@ -478,7 +511,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
queue_work(cq->channel->priv->wq,
&sq->recover.recover_work);
}
- sq->stats.cqe_err++;
+ sq->stats->cqe_err++;
}
do {
@@ -489,7 +522,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
- ci = sqcc & sq->wq.sz_m1;
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
skb = wi->skb;
@@ -538,7 +571,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
MLX5E_SQ_STOP_ROOM) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
- sq->stats.wake++;
+ sq->stats->wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
@@ -552,7 +585,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
int i;
while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
wi = &sq->db.wqe_info[ci];
skb = wi->skb;
@@ -574,18 +607,6 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
}
#ifdef CONFIG_MLX5_CORE_IPOIB
-
-struct mlx5_wqe_eth_pad {
- u8 rsvd0[16];
-};
-
-struct mlx5i_tx_wqe {
- struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_datagram_seg datagram;
- struct mlx5_wqe_eth_pad pad;
- struct mlx5_wqe_eth_seg eth;
-};
-
static inline void
mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
struct mlx5_wqe_datagram_seg *dseg)
@@ -598,67 +619,92 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- unsigned char *skb_data = skb->data;
- unsigned int skb_len = skb->len;
- u8 opcode = MLX5_OPCODE_SEND;
- unsigned int num_bytes;
- int num_dma;
- u16 headlen;
- u16 ds_cnt;
- u16 ihs;
-
- memset(wqe, 0, sizeof(*wqe));
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5i_tx_wqe *wqe;
- mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
+ struct mlx5_wqe_datagram_seg *datagram;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_eth_seg *eseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5e_tx_wqe_info *wi;
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+ struct mlx5e_sq_stats *stats = sq->stats;
+ u16 headlen, ihs, pi, frag_pi;
+ u16 ds_cnt, ds_cnt_inl = 0;
+ u8 num_wqebbs, opcode;
+ u32 num_bytes;
+ int num_dma;
+ __be16 mss;
+ /* Calc ihs and ds cnt, no writes to wqe yet */
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
- sq->stats.packets += skb_shinfo(skb)->gso_segs;
+ opcode = MLX5_OPCODE_LSO;
+ mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+ num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ stats->packets += skb_shinfo(skb)->gso_segs;
} else {
- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ opcode = MLX5_OPCODE_SEND;
+ mss = 0;
+ ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- sq->stats.packets++;
+ stats->packets++;
}
- sq->stats.bytes += num_bytes;
- sq->stats.xmit_more += skb->xmit_more;
+ stats->bytes += num_bytes;
+ stats->xmit_more += skb->xmit_more;
+
+ headlen = skb->len - ihs - skb->data_len;
+ ds_cnt += !!headlen;
+ ds_cnt += skb_shinfo(skb)->nr_frags;
+
+ if (ihs) {
+ ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
+ if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+ }
+
+ mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
+
+ /* fill wqe */
+ wi = &sq->db.wqe_info[pi];
+ cseg = &wqe->ctrl;
+ datagram = &wqe->datagram;
+ eseg = &wqe->eth;
+ dseg = wqe->data;
+
+ mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ eseg->mss = mss;
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (ihs) {
- memcpy(eseg->inline_hdr.start, skb_data, ihs);
- mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ memcpy(eseg->inline_hdr.start, skb->data, ihs);
eseg->inline_hdr.sz = cpu_to_be16(ihs);
- ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+ dseg += ds_cnt_inl;
}
- headlen = skb_len - skb->data_len;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
- (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
- num_bytes, num_dma, wi, cseg);
+ mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
+ num_dma, wi, cseg);
return NETDEV_TX_OK;
err_drop:
- sq->stats.dropped++;
+ stats->dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index f292bb346985..1b17f682693b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -44,6 +44,32 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
return cpumask_test_cpu(current_cpu, aff);
}
+static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_sq_stats *stats = sq->stats;
+ struct net_dim_sample dim_sample;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
+ return;
+
+ net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
+ &dim_sample);
+ net_dim(&sq->dim, dim_sample);
+}
+
+static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
+{
+ struct mlx5e_rq_stats *stats = rq->stats;
+ struct net_dim_sample dim_sample;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
+ return;
+
+ net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
+ &dim_sample);
+ net_dim(&rq->dim, dim_sample);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -75,18 +101,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
- for (i = 0; i < c->num_tc; i++)
+ for (i = 0; i < c->num_tc; i++) {
+ mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
-
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
- struct net_dim_sample dim_sample;
- net_dim_sample(c->rq.cq.event_ctr,
- c->rq.stats.packets,
- c->rq.stats.bytes,
- &dim_sample);
- net_dim(&c->rq.dim, dim_sample);
}
+ mlx5e_handle_rx_dim(&c->rq);
+
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1814f803bd2c..406c23862f5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -144,6 +144,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_GPIO_EVENT";
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
+ case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
+ return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
@@ -162,6 +164,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
case MLX5_EVENT_TYPE_FPGA_ERROR:
return "MLX5_EVENT_TYPE_FPGA_ERROR";
+ case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
default:
@@ -396,6 +400,20 @@ static void general_event_handler(struct mlx5_core_dev *dev,
}
}
+static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
+ struct mlx5_eqe *eqe)
+{
+ u64 value_lsb;
+ u64 value_msb;
+
+ value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
+
+ mlx5_core_warn(dev,
+ "High temperature on sensors with bit set %llx %llx",
+ value_msb, value_lsb);
+}
+
/* caller must eventually call mlx5_cq_put on the returned cq */
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
@@ -547,9 +565,14 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_FPGA_ERROR:
+ case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
break;
+ case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
+ mlx5_temp_warning_event(dev, eqe);
+ break;
+
case MLX5_EVENT_TYPE_GENERAL_EVENT:
general_event_handler(dev, eqe);
break;
@@ -822,10 +845,13 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
if (MLX5_CAP_GEN(dev, fpga))
- async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
+ (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
if (MLX5_CAP_GEN_MAX(dev, dct))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
+ if (MLX5_CAP_GEN(dev, temp_warn_event))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1352d13eedb3..6cab1dd66d1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -192,7 +192,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = vport;
+ dest.vport.num = vport;
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
@@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec->match_criteria_enable = match_header;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule =
- mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
+ mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
@@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
}
- esw->fdb_table.fdb = fdb;
+ esw->fdb_table.legacy.fdb = fdb;
/* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -343,9 +343,9 @@ out:
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
esw->fdb_table.legacy.addr_grp = NULL;
}
- if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
- mlx5_destroy_flow_table(esw->fdb_table.fdb);
- esw->fdb_table.fdb = NULL;
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) {
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
+ esw->fdb_table.legacy.fdb = NULL;
}
}
@@ -355,15 +355,15 @@ out:
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.fdb)
+ if (!esw->fdb_table.legacy.fdb)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
- mlx5_destroy_flow_table(esw->fdb_table.fdb);
- esw->fdb_table.fdb = NULL;
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
+ esw->fdb_table.legacy.fdb = NULL;
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
@@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */
- if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
+ if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
@@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
- if (!esw->fdb_table.fdb)
+ if (!esw->fdb_table.legacy.fdb)
return 0;
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
@@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport;
- if (!esw->fdb_table.fdb)
+ if (!esw->fdb_table.legacy.fdb)
return 0;
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 4cd773fa55e3..b174da2884c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -55,6 +55,9 @@
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
+#define mlx5_esw_has_fwd_fdb(dev) \
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
@@ -117,16 +120,18 @@ struct mlx5_vport {
};
struct mlx5_eswitch_fdb {
- void *fdb;
union {
struct legacy_fdb {
+ struct mlx5_flow_table *fdb;
struct mlx5_flow_group *addr_grp;
struct mlx5_flow_group *allmulti_grp;
struct mlx5_flow_group *promisc_grp;
} legacy;
struct offloads_fdb {
- struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *fast_fdb;
+ struct mlx5_flow_table *fwd_fdb;
+ struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni;
@@ -214,6 +219,10 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr);
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
@@ -227,9 +236,24 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
+enum mlx5_flow_match_level {
+ MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
+ MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
+ MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
+ MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
+};
+
+/* current maximum for flow based vport multicasting */
+#define MLX5_MAX_FLOW_FWD_VPORTS 2
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
- struct mlx5_eswitch_rep *out_rep;
+ struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5_core_dev *in_mdev;
+
+ int mirror_count;
+ int out_count;
int action;
__be16 vlan_proto;
@@ -238,6 +262,7 @@ struct mlx5_esw_flow_attr {
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
+ u8 match_level;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 35e256eb2f6e..cecd201f0b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -48,16 +48,22 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
- struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_table *ft = NULL;
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
+ int j, i = 0;
void *misc;
- int i = 0;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ if (attr->mirror_count)
+ ft = esw->fdb_table.offloads.fwd_fdb;
+ else
+ ft = esw->fdb_table.offloads.fast_fdb;
+
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
@@ -70,9 +76,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport_num = attr->out_rep->vport;
- i++;
+ for (j = attr->mirror_count; j < attr->out_count; j++) {
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport.num = attr->out_rep[j]->vport;
+ dest[i].vport.vhca_id =
+ MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
+ dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ i++;
+ }
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
@@ -88,11 +99,23 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ if (attr->match_level == MLX5_MATCH_NONE)
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ else
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS;
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
@@ -102,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
flow_act.encap_id = attr->encap_id;
- rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
- spec, &flow_act, dest, i);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
if (IS_ERR(rule))
goto err_add_rule;
else
@@ -117,6 +139,57 @@ err_counter_alloc:
return rule;
}
+struct mlx5_flow_handle *
+mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle *rule;
+ void *misc;
+ int i;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ for (i = 0; i < attr->mirror_count; i++) {
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport.num = attr->out_rep[i]->vport;
+ dest[i].vport.vhca_id =
+ MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
+ dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ }
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
+ i++;
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ if (attr->match_level == MLX5_MATCH_NONE)
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ else
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS;
+
+ rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
+
+ if (!IS_ERR(rule))
+ esw->offloads.num_flows++;
+
+ return rule;
+}
+
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
@@ -156,7 +229,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep;
- out_rep = attr->out_rep;
+ out_rep = attr->out_rep[0];
if (push)
vport = in_rep;
@@ -177,7 +250,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
goto out_notsupp;
in_rep = attr->in_rep;
- out_rep = attr->out_rep;
+ out_rep = attr->out_rep[0];
if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
@@ -228,7 +301,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
+ if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
@@ -288,7 +361,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->out_rep->vport == FDB_UPLINK_VPORT)
+ if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--;
return 0;
@@ -343,10 +416,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = vport;
+ dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
@@ -387,10 +460,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_c[0] = 0x01;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = 0;
+ dest.vport.num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -405,7 +478,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -437,7 +510,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP;
- goto out;
+ goto out_namespace;
}
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
@@ -447,6 +520,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ if (mlx5_esw_has_fwd_fdb(dev))
+ esw_size >>= 1;
+
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
@@ -457,17 +533,37 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
- goto out;
+ goto out_namespace;
}
- esw->fdb_table.fdb = fdb;
+ esw->fdb_table.offloads.fast_fdb = fdb;
-out:
+ if (!mlx5_esw_has_fwd_fdb(dev))
+ goto out_namespace;
+
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
+ esw_size,
+ ESW_OFFLOADS_NUM_GROUPS, 1,
+ flags);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create fwd table err %d\n", err);
+ goto out_ft;
+ }
+ esw->fdb_table.offloads.fwd_fdb = fdb;
+
+ return err;
+
+out_ft:
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
+out_namespace:
return err;
}
static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
{
- mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ if (mlx5_esw_has_fwd_fdb(esw->dev))
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
}
#define MAX_PF_SQ 256
@@ -513,7 +609,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
goto slow_fdb_err;
}
- esw->fdb_table.offloads.fdb = fdb;
+ esw->fdb_table.offloads.slow_fdb = fdb;
/* create send-to-vport group */
memset(flow_group_in, 0, inlen);
@@ -569,9 +665,9 @@ miss_rule_err:
miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
- mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw_destroy_offloads_fast_fdb_table(esw);
fast_fdb_err:
ns_err:
kvfree(flow_group_in);
@@ -580,7 +676,7 @@ ns_err:
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.fdb)
+ if (!esw->fdb_table.offloads.fast_fdb)
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
@@ -589,7 +685,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
esw_destroy_offloads_fast_fdb_table(esw);
}
@@ -663,7 +759,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
esw->offloads.vport_rx_group = g;
out:
- kfree(flow_group_in);
+ kvfree(flow_group_in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index d05233c9b4f6..eb8b0fe0b4e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -35,6 +35,13 @@
#include <linux/mlx5/driver.h>
+enum mlx5_fpga_device_id {
+ MLX5_FPGA_DEVICE_UNKNOWN = 0,
+ MLX5_FPGA_DEVICE_KU040 = 1,
+ MLX5_FPGA_DEVICE_KU060 = 2,
+ MLX5_FPGA_DEVICE_KU060_2 = 3,
+};
+
enum mlx5_fpga_image {
MLX5_FPGA_IMAGE_USER = 0,
MLX5_FPGA_IMAGE_FACTORY,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index de7fe087d6fe..4138a770ed57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -181,6 +181,7 @@ int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
if (!conn->qp.active)
return -ENOTCONN;
+ buf->dma_dir = DMA_TO_DEVICE;
err = mlx5_fpga_conn_map_buf(conn, buf);
if (err)
return err;
@@ -255,8 +256,6 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
buf = conn->qp.rq.bufs[ix];
conn->qp.rq.bufs[ix] = NULL;
- if (!status)
- buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
conn->qp.rq.cc++;
if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
@@ -274,6 +273,7 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
return;
}
+ buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
buf->sg[0].size);
conn->recv_cb(conn->cb_arg, buf);
@@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
}
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
- sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
+ sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
@@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
- MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
+ MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
- mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
+ mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
kvfree(in);
@@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto out;
err_cqwq:
- mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
+ mlx5_wq_destroy(&conn->cq.wq_ctrl);
out:
return err;
}
@@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
tasklet_disable(&conn->cq.tasklet);
tasklet_kill(&conn->cq.tasklet);
mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
- mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
+ mlx5_wq_destroy(&conn->cq.wq_ctrl);
}
static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
@@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
- mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
- (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
+ mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
index 44bd9eccc711..634ae10e287b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -54,7 +54,7 @@ struct mlx5_fpga_conn {
/* CQ */
struct {
struct mlx5_cqwq wq;
- struct mlx5_frag_wq_ctrl wq_ctrl;
+ struct mlx5_wq_ctrl wq_ctrl;
struct mlx5_core_cq mcq;
struct tasklet_struct tasklet;
} cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index dc8970346521..436a8136f26f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -50,6 +50,11 @@ static const char *const mlx5_fpga_error_strings[] = {
"Temperature Critical",
};
+static const char * const mlx5_fpga_qp_error_strings[] = {
+ "Null Syndrome",
+ "Retry Counter Expired",
+ "RNR Expired",
+};
static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
{
struct mlx5_fpga_device *fdev = NULL;
@@ -75,6 +80,21 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
}
}
+static const char *mlx5_fpga_device_name(u32 device)
+{
+ switch (device) {
+ case MLX5_FPGA_DEVICE_KU040:
+ return "ku040";
+ case MLX5_FPGA_DEVICE_KU060:
+ return "ku060";
+ case MLX5_FPGA_DEVICE_KU060_2:
+ return "ku060_2";
+ case MLX5_FPGA_DEVICE_UNKNOWN:
+ default:
+ return "unknown";
+ }
+}
+
static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
{
struct mlx5_fpga_query query;
@@ -128,8 +148,9 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned long flags;
unsigned int max_num_qps;
+ unsigned long flags;
+ u32 fpga_device_id;
int err;
if (!fdev)
@@ -143,12 +164,23 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
if (err)
goto out;
- mlx5_fpga_info(fdev, "device %u; %s image, version %u\n",
- MLX5_CAP_FPGA(fdev->mdev, fpga_device),
+ fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device);
+ mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n",
+ mlx5_fpga_device_name(fpga_device_id),
+ fpga_device_id,
mlx5_fpga_image_name(fdev->last_oper_image),
- MLX5_CAP_FPGA(fdev->mdev, image_version));
+ MLX5_CAP_FPGA(fdev->mdev, image_version),
+ MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id),
+ MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id),
+ MLX5_CAP_FPGA(fdev->mdev, sandbox_product_version));
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
+ if (!max_num_qps) {
+ mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n");
+ err = -ENOTSUPP;
+ goto out;
+ }
+
err = mlx5_core_reserve_gids(mdev, max_num_qps);
if (err)
goto out;
@@ -244,23 +276,38 @@ static const char *mlx5_fpga_syndrome_to_string(u8 syndrome)
return "Unknown";
}
+static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome)
+{
+ if (syndrome < ARRAY_SIZE(mlx5_fpga_qp_error_strings))
+ return mlx5_fpga_qp_error_strings[syndrome];
+ return "Unknown";
+}
+
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
const char *event_name;
bool teardown = false;
unsigned long flags;
+ u32 fpga_qpn;
u8 syndrome;
- if (event != MLX5_EVENT_TYPE_FPGA_ERROR) {
+ switch (event) {
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ syndrome = MLX5_GET(fpga_error_event, data, syndrome);
+ event_name = mlx5_fpga_syndrome_to_string(syndrome);
+ break;
+ case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+ syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome);
+ event_name = mlx5_fpga_qp_syndrome_to_string(syndrome);
+ fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
+ break;
+ default:
mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
event);
return;
}
- syndrome = MLX5_GET(fpga_error_event, data, syndrome);
- event_name = mlx5_fpga_syndrome_to_string(syndrome);
-
spin_lock_irqsave(&fdev->state_lock, flags);
switch (fdev->state) {
case MLX5_FPGA_STATUS_SUCCESS:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 82405ed84725..3e2355c8df3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -53,6 +53,7 @@ struct mlx5_fpga_device {
} conn_res;
struct mlx5_fpga_ipsec *ipsec;
+ struct mlx5_fpga_tls *tls;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index fad8c2e3804e..a0433b48e833 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -43,9 +43,6 @@
#include "fpga/sdk.h"
#include "fpga/core.h"
-#define SBU_QP_QUEUE_SIZE 8
-#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
-
enum mlx5_fpga_ipsec_cmd_status {
MLX5_FPGA_IPSEC_CMD_PENDING,
MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
@@ -256,7 +253,7 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{
struct mlx5_fpga_ipsec_cmd_context *context = ctx;
unsigned long timeout =
- msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
+ msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
int res;
res = wait_for_completion_timeout(&context->complete, timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
index baa537e54a49..656f96be6e20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -41,9 +41,17 @@
* DOC: Innova SDK
* This header defines the in-kernel API for Innova FPGA client drivers.
*/
+#define SBU_QP_QUEUE_SIZE 8
+#define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000)
+/**
+ * enum mlx5_fpga_access_type - Enumerated the different methods possible for
+ * accessing the device memory address space
+ */
enum mlx5_fpga_access_type {
+ /** Use the slow CX-FPGA I2C bus */
MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
+ /** Use the fastest available method */
MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
new file mode 100644
index 000000000000..c9736238604a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+#include "fpga/tls.h"
+#include "fpga/cmd.h"
+#include "fpga/sdk.h"
+#include "fpga/core.h"
+#include "accel/tls.h"
+
+struct mlx5_fpga_tls_command_context;
+
+typedef void (*mlx5_fpga_tls_command_complete)
+ (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *ctx,
+ struct mlx5_fpga_dma_buf *resp);
+
+struct mlx5_fpga_tls_command_context {
+ struct list_head list;
+ /* There is no guarantee on the order between the TX completion
+ * and the command response.
+ * The TX completion is going to touch cmd->buf even in
+ * the case of successful transmission.
+ * So instead of requiring separate allocations for cmd
+ * and cmd->buf we've decided to use a reference counter
+ */
+ refcount_t ref;
+ struct mlx5_fpga_dma_buf buf;
+ mlx5_fpga_tls_command_complete complete;
+};
+
+static void
+mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
+{
+ if (refcount_dec_and_test(&ctx->ref))
+ kfree(ctx);
+}
+
+static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *resp)
+{
+ struct mlx5_fpga_conn *conn = fdev->tls->conn;
+ struct mlx5_fpga_tls_command_context *ctx;
+ struct mlx5_fpga_tls *tls = fdev->tls;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tls->pending_cmds_lock, flags);
+ ctx = list_first_entry(&tls->pending_cmds,
+ struct mlx5_fpga_tls_command_context, list);
+ list_del(&ctx->list);
+ spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
+ ctx->complete(conn, fdev, ctx, resp);
+}
+
+static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf,
+ u8 status)
+{
+ struct mlx5_fpga_tls_command_context *ctx =
+ container_of(buf, struct mlx5_fpga_tls_command_context, buf);
+
+ mlx5_fpga_tls_put_command_ctx(ctx);
+
+ if (unlikely(status))
+ mlx5_fpga_tls_cmd_complete(fdev, NULL);
+}
+
+static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *cmd,
+ mlx5_fpga_tls_command_complete complete)
+{
+ struct mlx5_fpga_tls *tls = fdev->tls;
+ unsigned long flags;
+ int ret;
+
+ refcount_set(&cmd->ref, 2);
+ cmd->complete = complete;
+ cmd->buf.complete = mlx5_fpga_cmd_send_complete;
+
+ spin_lock_irqsave(&tls->pending_cmds_lock, flags);
+ /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
+ * to make sure commands are inserted to the tls->pending_cmds list
+ * and the command QP in the same order.
+ */
+ ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
+ if (likely(!ret))
+ list_add_tail(&cmd->list, &tls->pending_cmds);
+ else
+ complete(tls->conn, fdev, cmd, NULL);
+ spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
+}
+
+/* Start of context identifiers range (inclusive) */
+#define SWID_START 0
+/* End of context identifiers range (exclusive) */
+#define SWID_END BIT(24)
+
+static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
+ void *ptr)
+{
+ int ret;
+
+ /* TLS metadata format is 1 byte for syndrome followed
+ * by 3 bytes of swid (software ID)
+ * swid must not exceed 3 bytes.
+ * See tls_rxtx.c:insert_pet() for details
+ */
+ BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(idr_spinlock);
+ ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
+ spin_unlock_irq(idr_spinlock);
+ idr_preload_end();
+
+ return ret;
+}
+
+static void mlx5_fpga_tls_release_swid(struct idr *idr,
+ spinlock_t *idr_spinlock, u32 swid)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(idr_spinlock, flags);
+ idr_remove(idr, swid);
+ spin_unlock_irqrestore(idr_spinlock, flags);
+}
+
+struct mlx5_teardown_stream_context {
+ struct mlx5_fpga_tls_command_context cmd;
+ u32 swid;
+};
+
+static void
+mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *cmd,
+ struct mlx5_fpga_dma_buf *resp)
+{
+ struct mlx5_teardown_stream_context *ctx =
+ container_of(cmd, struct mlx5_teardown_stream_context, cmd);
+
+ if (resp) {
+ u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
+
+ if (syndrome)
+ mlx5_fpga_err(fdev,
+ "Teardown stream failed with syndrome = %d",
+ syndrome);
+ else
+ mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
+ &fdev->tls->idr_spinlock,
+ ctx->swid);
+ }
+ mlx5_fpga_tls_put_command_ctx(cmd);
+}
+
+static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
+{
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
+ MLX5_BYTE_OFF(tls_flow, ipv6));
+
+ MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
+ MLX5_SET(tls_cmd, cmd, direction_sx,
+ MLX5_GET(tls_flow, flow, direction_sx));
+}
+
+static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
+ void *flow, u32 swid, gfp_t flags)
+{
+ struct mlx5_teardown_stream_context *ctx;
+ struct mlx5_fpga_dma_buf *buf;
+ void *cmd;
+
+ ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
+ if (!ctx)
+ return;
+
+ buf = &ctx->cmd.buf;
+ cmd = (ctx + 1);
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
+ MLX5_SET(tls_cmd, cmd, swid, swid);
+
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+ kfree(flow);
+
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+
+ ctx->swid = swid;
+ mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_teardown_completion);
+}
+
+void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags)
+{
+ struct mlx5_fpga_tls *tls = mdev->fpga->tls;
+ void *flow;
+
+ rcu_read_lock();
+ flow = idr_find(&tls->tx_idr, swid);
+ rcu_read_unlock();
+
+ if (!flow) {
+ mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
+ swid);
+ return;
+ }
+
+ mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
+}
+
+enum mlx5_fpga_setup_stream_status {
+ MLX5_FPGA_CMD_PENDING,
+ MLX5_FPGA_CMD_SEND_FAILED,
+ MLX5_FPGA_CMD_RESPONSE_RECEIVED,
+ MLX5_FPGA_CMD_ABANDONED,
+};
+
+struct mlx5_setup_stream_context {
+ struct mlx5_fpga_tls_command_context cmd;
+ atomic_t status;
+ u32 syndrome;
+ struct completion comp;
+};
+
+static void
+mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *cmd,
+ struct mlx5_fpga_dma_buf *resp)
+{
+ struct mlx5_setup_stream_context *ctx =
+ container_of(cmd, struct mlx5_setup_stream_context, cmd);
+ int status = MLX5_FPGA_CMD_SEND_FAILED;
+ void *tls_cmd = ctx + 1;
+
+ /* If we failed to send to command resp == NULL */
+ if (resp) {
+ ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
+ status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
+ }
+
+ status = atomic_xchg_release(&ctx->status, status);
+ if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
+ complete(&ctx->comp);
+ return;
+ }
+
+ mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
+ ctx->syndrome);
+
+ if (!ctx->syndrome) {
+ /* The process was killed while waiting for the context to be
+ * added, and the add completed successfully.
+ * We need to destroy the HW context, and we can't can't reuse
+ * the command context because we might not have received
+ * the tx completion yet.
+ */
+ mlx5_fpga_tls_del_tx_flow(fdev->mdev,
+ MLX5_GET(tls_cmd, tls_cmd, swid),
+ GFP_ATOMIC);
+ }
+
+ mlx5_fpga_tls_put_command_ctx(cmd);
+}
+
+static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
+ struct mlx5_setup_stream_context *ctx)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ void *cmd = ctx + 1;
+ int status, ret = 0;
+
+ buf = &ctx->cmd.buf;
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
+
+ init_completion(&ctx->comp);
+ atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
+ ctx->syndrome = -1;
+
+ mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_setup_completion);
+ wait_for_completion_killable(&ctx->comp);
+
+ status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
+ if (unlikely(status == MLX5_FPGA_CMD_PENDING))
+ /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
+ return -EINTR;
+
+ if (unlikely(ctx->syndrome))
+ ret = -ENOMEM;
+
+ mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
+ return ret;
+}
+
+static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
+
+ mlx5_fpga_tls_cmd_complete(fdev, buf);
+}
+
+bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
+ MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
+ MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
+ return false;
+
+ return true;
+}
+
+static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
+ u32 *p_caps)
+{
+ int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
+ u32 caps = 0;
+ void *buf;
+
+ buf = kzalloc(cap_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
+ if (err)
+ goto out;
+
+ if (MLX5_GET(tls_extended_cap, buf, tx))
+ caps |= MLX5_ACCEL_TLS_TX;
+ if (MLX5_GET(tls_extended_cap, buf, rx))
+ caps |= MLX5_ACCEL_TLS_RX;
+ if (MLX5_GET(tls_extended_cap, buf, tls_v12))
+ caps |= MLX5_ACCEL_TLS_V12;
+ if (MLX5_GET(tls_extended_cap, buf, tls_v13))
+ caps |= MLX5_ACCEL_TLS_V13;
+ if (MLX5_GET(tls_extended_cap, buf, lro))
+ caps |= MLX5_ACCEL_TLS_LRO;
+ if (MLX5_GET(tls_extended_cap, buf, ipv6))
+ caps |= MLX5_ACCEL_TLS_IPV6;
+
+ if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
+ caps |= MLX5_ACCEL_TLS_AES_GCM128;
+ if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
+ caps |= MLX5_ACCEL_TLS_AES_GCM256;
+
+ *p_caps = caps;
+ err = 0;
+out:
+ kfree(buf);
+ return err;
+}
+
+int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ struct mlx5_fpga_conn_attr init_attr = {0};
+ struct mlx5_fpga_conn *conn;
+ struct mlx5_fpga_tls *tls;
+ int err = 0;
+
+ if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
+ return 0;
+
+ tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+ if (!tls)
+ return -ENOMEM;
+
+ err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
+ if (err)
+ goto error;
+
+ if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
+ MLX5_ACCEL_TLS_AES_GCM128))) {
+ err = -ENOTSUPP;
+ goto error;
+ }
+
+ init_attr.rx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.tx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
+ init_attr.cb_arg = fdev;
+ conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
+ if (IS_ERR(conn)) {
+ err = PTR_ERR(conn);
+ mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
+ err);
+ goto error;
+ }
+
+ tls->conn = conn;
+ spin_lock_init(&tls->pending_cmds_lock);
+ INIT_LIST_HEAD(&tls->pending_cmds);
+
+ idr_init(&tls->tx_idr);
+ spin_lock_init(&tls->idr_spinlock);
+ fdev->tls = tls;
+ return 0;
+
+error:
+ kfree(tls);
+ return err;
+}
+
+void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ if (!fdev || !fdev->tls)
+ return;
+
+ mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
+ kfree(fdev->tls);
+ fdev->tls = NULL;
+}
+
+static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
+ struct tls_crypto_info *info,
+ __be64 *rcd_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *crypto_info =
+ (struct tls12_crypto_info_aes_gcm_128 *)info;
+
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
+ TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
+ crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
+ crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+
+ /* in AES-GCM 128 we need to write the key twice */
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
+ TLS_CIPHER_AES_GCM_128_KEY_SIZE,
+ crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+
+ MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
+}
+
+static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
+ struct tls_crypto_info *crypto_info)
+{
+ __be64 rcd_sn;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
+ return -EINVAL;
+ mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info, u32 swid,
+ u32 tcp_sn)
+{
+ u32 caps = mlx5_fpga_tls_device_caps(mdev);
+ struct mlx5_setup_stream_context *ctx;
+ int ret = -ENOMEM;
+ size_t cmd_size;
+ void *cmd;
+
+ cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
+ ctx = kzalloc(cmd_size, GFP_KERNEL);
+ if (!ctx)
+ goto out;
+
+ cmd = ctx + 1;
+ ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
+ if (ret)
+ goto free_ctx;
+
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+ MLX5_SET(tls_cmd, cmd, swid, swid);
+ MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
+
+ return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
+
+free_ctx:
+ kfree(ctx);
+out:
+ return ret;
+}
+
+int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid)
+{
+ struct mlx5_fpga_tls *tls = mdev->fpga->tls;
+ int ret = -ENOMEM;
+ u32 swid;
+
+ ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+ if (ret < 0)
+ return ret;
+
+ swid = ret;
+ MLX5_SET(tls_flow, flow, direction_sx, 1);
+
+ ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+ start_offload_tcp_sn);
+ if (ret && ret != -EINTR)
+ goto free_swid;
+
+ *p_swid = swid;
+ return 0;
+free_swid:
+ mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
new file mode 100644
index 000000000000..800a214e4e49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_FPGA_TLS_H__
+#define __MLX5_FPGA_TLS_H__
+
+#include <linux/mlx5/driver.h>
+
+#include <net/tls.h>
+#include "fpga/core.h"
+
+struct mlx5_fpga_tls {
+ struct list_head pending_cmds;
+ spinlock_t pending_cmds_lock; /* Protects pending_cmds */
+ u32 caps;
+ struct mlx5_fpga_conn *conn;
+
+ struct idr tx_idr;
+ spinlock_t idr_spinlock; /* protects the IDR */
+};
+
+int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid);
+
+void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags);
+
+bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
+int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
+
+static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
+{
+ return mdev->fpga->tls->caps;
+}
+
+#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ef5afd7c9325..5a00deff5457 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -372,6 +372,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
if (dst->dest_attr.type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
id = dst->dest_attr.ft->id;
+ } else if (dst->dest_attr.type ==
+ MLX5_FLOW_DESTINATION_TYPE_VPORT) {
+ id = dst->dest_attr.vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ dst->dest_attr.vport.vhca_id_valid);
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dst->dest_attr.vport.vhca_id);
} else {
id = dst->dest_attr.tir_num;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 56e275199256..e1b609c61d59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1373,6 +1373,8 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
+ u8 src_esw_owner_mask_on;
+ void *misc;
int err;
u32 *in;
@@ -1385,6 +1387,14 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
fg->max_ftes - 1);
+
+ misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
+ misc_parameters);
+ src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in,
+ source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
+
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
memcpy(match_criteria_addr, fg->mask.match_criteria,
@@ -1405,7 +1415,7 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
- d1->vport_num == d2->vport_num) ||
+ d1->vport.num == d2->vport.num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -2484,7 +2494,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_root_ns)
return -ENOMEM;
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
if (IS_ERR(prio))
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 6d9053bcbe95..08eac92fc26c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -33,6 +33,8 @@
#ifndef __MLX5E_IPOB_H__
#define __MLX5E_IPOB_H__
+#ifdef CONFIG_MLX5_CORE_IPOIB
+
#include <linux/mlx5/fs.h>
#include "en.h"
@@ -93,8 +95,32 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
/* Extract mlx5e_priv from IPoIB netdev */
#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+struct mlx5i_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_datagram_seg datagram;
+ struct mlx5_wqe_eth_pad pad;
+ struct mlx5_wqe_eth_seg eth;
+ struct mlx5_wqe_data_seg data[0];
+};
+
+static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
+ struct mlx5i_tx_wqe **wqe,
+ u16 *pi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(*wqe, 0, sizeof(**wqe));
+}
+
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+#endif /* CONFIG_MLX5_CORE_IPOIB */
#endif /* __MLX5E_IPOB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e2c465b0b3f8..615005e63819 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
+#include "accel/tls.h"
#include "lib/clock.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1190,6 +1191,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_ipsec_start;
}
+ err = mlx5_accel_tls_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "TLS device start failed %d\n", err);
+ goto err_tls_start;
+ }
+
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1231,6 +1238,9 @@ err_sriov:
mlx5_cleanup_fs(dev);
err_fs:
+ mlx5_accel_tls_cleanup(dev);
+
+err_tls_start:
mlx5_accel_ipsec_cleanup(dev);
err_ipsec_start:
@@ -1306,6 +1316,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
+ mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index b9736f505bdf..f4f02f775c93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -123,8 +123,8 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
write_unlock_irqrestore(&table->lock, flags);
if (!deleted_mkey) {
- mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n",
- mlx5_base_mkey(mkey->key));
+ mlx5_core_dbg(dev, "failed radix tree delete of mkey 0x%x\n",
+ mlx5_base_mkey(mkey->key));
return -ENOENT;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 02d6c5b5d502..4ca07bfb6b14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -407,21 +407,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
case MLX5_CMD_OP_RST2INIT_QP:
if (MBOX_ALLOC(mbox, rst2init_qp))
return -ENOMEM;
- MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
- break;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM;
- MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
- break;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
- MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
- break;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 177e076b8d17..719cecb182c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -511,7 +511,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
- kfree(out);
+ kvfree(out);
return 0;
}
@@ -531,7 +531,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
- kfree(out);
+ kvfree(out);
return 0;
}
@@ -587,7 +587,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
- kfree(out);
+ kvfree(out);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index ea66448ba365..b97bb72b4db4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -36,7 +36,12 @@
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
{
- return (u32)wq->sz_m1 + 1;
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
+u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->fbc.frag_sz_m1 + 1;
}
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
- return (u32)wq->sz_m1 + 1;
+ return (u32)wq->fbc.sz_m1 + 1;
}
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
{
- return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+ return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
@@ -67,17 +72,20 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
{
- return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+ return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
}
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
- wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
- wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+ mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
+ MLX5_GET(wq, wqc, log_wq_sz),
+ fbc);
+ wq->sz = wq->fbc.sz_m1 + 1;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -85,14 +93,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
- err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
- &wq_ctrl->buf, param->buf_numa_node);
+ err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.frags->buf;
+ fbc->frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -105,17 +113,35 @@ err_db_free:
return err;
}
+static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+ struct mlx5_wq_qp *qp)
+{
+ struct mlx5_frag_buf *rqb, *sqb;
+
+ rqb = &qp->rq.fbc.frag_buf;
+ *rqb = *buf;
+ rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
+ rqb->npages = 1 << get_order(rqb->size);
+
+ sqb = &qp->sq.fbc.frag_buf;
+ *sqb = *buf;
+ sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
+ sqb->npages = 1 << get_order(sqb->size);
+ sqb->frags += rqb->npages; /* first part is for the rq */
+}
+
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
int err;
- wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
- wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1;
-
- wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB);
- wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1;
+ mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
+ MLX5_GET(qpc, qpc, log_rq_size),
+ &wq->rq.fbc);
+ mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
+ MLX5_GET(qpc, qpc, log_sq_size),
+ &wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -123,15 +149,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
- err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
- &wq_ctrl->buf, param->buf_numa_node);
+ err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- wq->rq.buf = wq_ctrl->buf.frags->buf;
- wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
+ mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -147,7 +173,7 @@ err_db_free:
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
- struct mlx5_frag_wq_ctrl *wq_ctrl)
+ struct mlx5_wq_ctrl *wq_ctrl)
{
int err;
@@ -160,7 +186,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
- &wq_ctrl->frag_buf,
+ &wq_ctrl->buf,
param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
@@ -168,7 +194,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->fbc.frag_buf = wq_ctrl->frag_buf;
+ wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -185,12 +211,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
struct mlx5_wqe_srq_next_seg *next_seg;
int err;
int i;
- wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
- wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+ mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
+ MLX5_GET(wq, wqc, log_wq_sz),
+ fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -198,17 +226,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
- err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
- &wq_ctrl->buf, param->buf_numa_node);
+ err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.frags->buf;
+ wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
- for (i = 0; i < wq->sz_m1; i++) {
+ for (i = 0; i < fbc->sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i);
next_seg->next_wqe_index = cpu_to_be16(i + 1);
}
@@ -227,12 +255,7 @@ err_db_free:
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
{
- mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
}
-void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
-{
- mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
- mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index fca90b94596d..0b47126815b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -38,7 +38,6 @@
#include <linux/mlx5/qp.h>
struct mlx5_wq_param {
- int linear;
int buf_numa_node;
int db_numa_node;
};
@@ -49,17 +48,12 @@ struct mlx5_wq_ctrl {
struct mlx5_db db;
};
-struct mlx5_frag_wq_ctrl {
- struct mlx5_core_dev *mdev;
- struct mlx5_frag_buf frag_buf;
- struct mlx5_db db;
-};
-
struct mlx5_wq_cyc {
- void *buf;
+ struct mlx5_frag_buf_ctrl fbc;
__be32 *db;
- u16 sz_m1;
- u8 log_stride;
+ u16 sz;
+ u16 wqe_ctr;
+ u16 cur_sz;
};
struct mlx5_wq_qp {
@@ -74,20 +68,19 @@ struct mlx5_cqwq {
};
struct mlx5_wq_ll {
- void *buf;
+ struct mlx5_frag_buf_ctrl fbc;
__be32 *db;
__be16 *tail_next;
- u16 sz_m1;
u16 head;
u16 wqe_ctr;
u16 cur_sz;
- u8 log_stride;
};
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -95,7 +88,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
- struct mlx5_frag_wq_ctrl *wq_ctrl);
+ struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
@@ -104,16 +97,67 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
-void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
+
+static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
+{
+ return wq->cur_sz == wq->sz;
+}
+
+static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq)
+{
+ return wq->sz - wq->cur_sz;
+}
+
+static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
+{
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
+{
+ wq->wqe_ctr += n;
+ wq->cur_sz += n;
+}
+
+static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq)
+{
+ wq->cur_sz--;
+}
+
+static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{
- return ctr & wq->sz_m1;
+ return ctr & wq->fbc.sz_m1;
+}
+
+static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->fbc.frag_sz_m1;
+}
+
+static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
+}
+
+static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz);
}
static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
{
- return wq->buf + (ix << wq->log_stride);
+ return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
@@ -124,9 +168,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
return !equal && !smaller;
}
+static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
+{
+ return ctr & wq->fbc.sz_m1;
+}
+
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
{
- return wq->cc & wq->fbc.sz_m1;
+ return mlx5_cqwq_ctr2ix(wq, wq->cc);
}
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
@@ -134,9 +183,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
+{
+ return ctr >> wq->fbc.log_sz;
+}
+
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
{
- return wq->cc >> wq->fbc.log_sz;
+ return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc);
}
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
@@ -167,7 +221,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
- return wq->cur_sz == wq->sz_m1;
+ return wq->cur_sz == wq->fbc.sz_m1;
}
static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
@@ -177,7 +231,7 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{
- return wq->buf + (ix << wq->log_stride);
+ return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 479511cf79bc..2bc48054b685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -58,7 +58,7 @@ static inline void mlxsw_cmd_mbox_zero(char *mbox)
struct mlxsw_core;
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
- u32 in_mod, bool out_mbox_direct,
+ u32 in_mod, bool out_mbox_direct, bool reset_ok,
char *in_mbox, size_t in_mbox_size,
char *out_mbox, size_t out_mbox_size);
@@ -67,7 +67,7 @@ static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
size_t in_mbox_size)
{
return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
- in_mbox, in_mbox_size, NULL, 0);
+ false, in_mbox, in_mbox_size, NULL, 0);
}
static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
@@ -76,7 +76,7 @@ static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
char *out_mbox, size_t out_mbox_size)
{
return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
- out_mbox_direct, NULL, 0,
+ out_mbox_direct, false, NULL, 0,
out_mbox, out_mbox_size);
}
@@ -84,7 +84,7 @@ static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
u8 opcode_mod, u32 in_mod)
{
return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
- NULL, 0, NULL, 0);
+ false, NULL, 0, NULL, 0);
}
enum mlxsw_cmd_opcode {
@@ -179,6 +179,8 @@ enum mlxsw_cmd_status {
MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
/* NVMEM checksum/CRC failed. */
MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
+ /* Device is currently running reset */
+ MLXSW_CMD_STATUS_RUNNING_RESET = 0x26,
/* Bad management packet (silently discarded). */
MLXSW_CMD_STATUS_BAD_PKT = 0x30,
};
@@ -208,6 +210,8 @@ static inline const char *mlxsw_cmd_status_str(u8 status)
return "BAD_INDEX";
case MLXSW_CMD_STATUS_BAD_NVMEM:
return "BAD_NVMEM";
+ case MLXSW_CMD_STATUS_RUNNING_RESET:
+ return "RUNNING_RESET";
case MLXSW_CMD_STATUS_BAD_PKT:
return "BAD_PKT";
default:
@@ -424,10 +428,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
/* cmd_mbox_query_aq_cap_log_max_cq_sz
- * Log (base 2) of max CQEs allowed on CQ.
+ * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
*/
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
+ * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
+
/* cmd_mbox_query_aq_cap_max_num_cqs
* Maximum number of CQs.
*/
@@ -662,6 +671,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
+/* cmd_mbox_config_set_cqe_version
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
+
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
@@ -841,6 +856,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
+/* cmd_mbox_config_profile_cqe_version
+ * CQE version:
+ * 0: CQE version is 0
+ * 1: CQE version is either 1 or 2
+ * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
+
/* ACCESS_REG - Access EMAD Supported Register
* ----------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -850,10 +873,12 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
*/
static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+ bool reset_ok,
char *in_mbox, char *out_mbox)
{
return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
- 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+ 0, 0, false, reset_ok,
+ in_mbox, MLXSW_CMD_MBOX_SIZE,
out_mbox, MLXSW_CMD_MBOX_SIZE);
}
@@ -1032,11 +1057,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
}
-/* cmd_mbox_sw2hw_cq_cv
+enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
+};
+
+/* cmd_mbox_sw2hw_cq_cqe_ver
* CQE Version.
- * 0 - CQE Version 0, 1 - CQE Version 1
*/
-MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
/* cmd_mbox_sw2hw_cq_c_eqn
* Event Queue this CQ reports completion events to.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e13ac3b8dff7..f9c724752a32 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -770,27 +770,35 @@ static void mlxsw_core_driver_put(const char *kind)
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
- unsigned int count)
+ unsigned int count,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports)
+ if (port_index >= mlxsw_core->max_ports) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
return -EINVAL;
+ }
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
+ return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
+ extack);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
- unsigned int port_index)
+ unsigned int port_index,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports)
+ if (port_index >= mlxsw_core->max_ports) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
return -EINVAL;
+ }
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
+ return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
+ extack);
}
static int
@@ -963,17 +971,16 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
-static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
int err;
- if (!mlxsw_bus->reset)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
return -EOPNOTSUPP;
mlxsw_core_bus_device_unregister(mlxsw_core, true);
- mlxsw_bus->reset(mlxsw_core->bus_priv);
err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
@@ -1480,6 +1487,7 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
{
enum mlxsw_emad_op_tlv_status status;
int err, n_retry;
+ bool reset_ok;
char *in_mbox, *out_mbox, *tmp;
dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
@@ -1501,9 +1509,16 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+ /* There is a special treatment needed for MRSR (reset) register.
+ * The command interface will return error after the command
+ * is executed, so tell the lower layer to expect it
+ * and cope accordingly.
+ */
+ reset_ok = reg->id == MLXSW_REG_MRSR_ID;
+
n_retry = 0;
retry:
- err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+ err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
if (!err) {
err = mlxsw_emad_process_status(out_mbox, &status);
if (err) {
@@ -1714,15 +1729,16 @@ EXPORT_SYMBOL(mlxsw_core_port_fini);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv, struct net_device *dev,
- bool split, u32 split_group)
+ u32 port_number, bool split,
+ u32 split_port_subnumber)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
mlxsw_core_port->port_driver_priv = port_driver_priv;
- if (split)
- devlink_port_split_set(devlink_port, split_group);
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber);
devlink_port_type_eth_set(devlink_port, dev);
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
@@ -1762,6 +1778,17 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_type_get);
+int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
+ u8 local_port, char *name, size_t len)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ return devlink_port_get_phys_port_name(devlink_port, name, len);
+}
+EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
+
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
@@ -1781,7 +1808,7 @@ static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
}
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
- u32 in_mod, bool out_mbox_direct,
+ u32 in_mod, bool out_mbox_direct, bool reset_ok,
char *in_mbox, size_t in_mbox_size,
char *out_mbox, size_t out_mbox_size)
{
@@ -1804,7 +1831,15 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
in_mbox, in_mbox_size,
out_mbox, out_mbox_size, &status);
- if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+ if (!err && out_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+ }
+
+ if (reset_ok && err == -EIO &&
+ status == MLXSW_CMD_STATUS_RUNNING_RESET) {
+ err = 0;
+ } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
in_mod, status, mlxsw_cmd_status_str(status));
@@ -1814,10 +1849,6 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
in_mod);
}
- if (!err && out_mbox) {
- dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
- mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
- }
return err;
}
EXPORT_SYMBOL(mlxsw_cmd_exec);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 092d39399f3c..552cfa29c2f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -201,13 +201,16 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv, struct net_device *dev,
- bool split, u32 split_group);
+ u32 port_number, bool split,
+ u32 split_port_subnumber);
void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
+ u8 local_port, char *name, size_t len);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
@@ -271,8 +274,9 @@ struct mlxsw_driver {
int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
- unsigned int count);
- int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
+ unsigned int count, struct netlink_ext_ack *extack);
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -334,6 +338,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
#define MLXSW_BUS_F_TXRX BIT(0)
+#define MLXSW_BUS_F_RESET BIT(1)
struct mlxsw_bus {
const char *kind;
@@ -341,7 +346,6 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
- void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 3a9381977d6d..fc4557245ff4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
struct {
u32 comp_sdq_count;
u32 comp_rdq_count;
+ enum mlxsw_pci_cqe_v v;
} cq;
struct {
u32 ev_cmd_count;
@@ -155,6 +156,8 @@ struct mlxsw_pci {
} cmd;
struct mlxsw_bus_info bus_info;
const struct pci_device_id *id;
+ enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
+ u8 num_sdq_cqs; /* Number of CQs used for SDQs */
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
return owner_bit != !!(q->consumer_counter & q->count);
}
-static char *
-mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
- u32 (*get_elem_owner_func)(const char *))
-{
- struct mlxsw_pci_queue_elem_info *elem_info;
- char *elem;
- bool owner_bit;
-
- elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- elem = elem_info->elem;
- owner_bit = get_elem_owner_func(elem);
- if (mlxsw_pci_elem_hw_owned(q, owner_bit))
- return NULL;
- q->consumer_counter++;
- rmb(); /* make sure we read owned bit before the rest of elem */
- return elem;
-}
-
static struct mlxsw_pci_queue_type_group *
mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type)
@@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
}
}
+static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ q->u.cq.v = mlxsw_pci->max_cqe_ver;
+
+ /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
+ if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
+ q->num < mlxsw_pci->num_sdq_cqs)
+ q->u.cq.v = MLXSW_PCI_CQE_V1;
+}
+
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i = 0; i < q->count; i++) {
char *elem = mlxsw_pci_queue_elem_get(q, i);
- mlxsw_pci_cqe_owner_set(elem, 1);
+ mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
}
- mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+ if (q->u.cq.v == MLXSW_PCI_CQE_V1)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
+ else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
+
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
@@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
- char *cqe)
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- if (mlxsw_pci_cqe_lag_get(cqe)) {
+ if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
- rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
- rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
+ rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
+ rx_info.lag_port_index =
+ mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
} else {
rx_info.is_lag = false;
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
@@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
- if (mlxsw_pci_cqe_crc_get(cqe))
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
{
- return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
}
static void mlxsw_pci_cq_tasklet(unsigned long data)
@@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
- u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
- u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
if (sendq) {
struct mlxsw_pci_queue *sdq;
@@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, cqe);
+ wqe_counter, q->u.cq.v, cqe);
q->u.cq.comp_rdq_count++;
}
if (++items == credits)
@@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
}
}
+static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
+ MLXSW_PCI_CQE01_COUNT;
+}
+
+static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
+ MLXSW_PCI_CQE01_SIZE;
+}
+
static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
{
- return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = mlxsw_pci_eqe_owner_get(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
}
static void mlxsw_pci_eq_tasklet(unsigned long data)
@@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
struct mlxsw_pci_queue_ops {
const char *name;
enum mlxsw_pci_queue_type type;
+ void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
void (*tasklet)(unsigned long data);
+ u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
+ u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
u8 elem_size;
};
@@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .pre_init = mlxsw_pci_cq_pre_init,
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
.tasklet = mlxsw_pci_cq_tasklet,
- .elem_count = MLXSW_PCI_CQE_COUNT,
- .elem_size = MLXSW_PCI_CQE_SIZE
+ .elem_count_f = mlxsw_pci_cq_elem_count,
+ .elem_size_f = mlxsw_pci_cq_elem_size
};
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
@@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
int i;
int err;
- spin_lock_init(&q->lock);
q->num = q_num;
- q->count = q_ops->elem_count;
- q->elem_size = q_ops->elem_size;
+ if (q_ops->pre_init)
+ q_ops->pre_init(mlxsw_pci, q);
+
+ spin_lock_init(&q->lock);
+ q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
+ q_ops->elem_count;
+ q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
+ q_ops->elem_size;
q->type = q_ops->type;
q->pci = mlxsw_pci;
@@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
elem_info->elem =
- __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+ __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
}
mlxsw_cmd_mbox_zero(mbox);
@@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
u8 rdq_log2sz;
u8 num_cqs;
u8 cq_log2sz;
+ u8 cqv2_log2sz;
u8 num_eqs;
u8 eq_log2sz;
int err;
@@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
@@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
- (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
+ (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
+ (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
return -EINVAL;
}
+ mlxsw_pci->num_sdq_cqs = num_sdqs;
+
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
num_eqs);
if (err) {
@@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
&profile->swid_config[i]);
+ if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
+ }
+
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
@@ -1313,6 +1371,51 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
mbox->mapaddr);
}
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
+{
+ unsigned long end;
+ char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ int err;
+
+ mlxsw_reg_mrsr_pack(mrsr_pl);
+ err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ if (err)
+ return err;
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
+ /* We must wait for the HW to become responsive once again. */
+ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
+ end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ do {
+ u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
+
+ if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
+ break;
+ cond_resched();
+ } while (time_before(jiffies, end));
+ return 0;
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
@@ -1340,6 +1443,16 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_out_mbox_alloc;
+ err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ if (err)
+ goto err_sw_reset;
+
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_alloc_irq;
+ }
+
err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
if (err)
goto err_query_fw;
@@ -1378,6 +1491,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
+ else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
+ else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
+ } else {
+ dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
+ goto err_cqe_v_check;
+ }
+
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
if (err)
goto err_config_profile;
@@ -1400,6 +1528,7 @@ err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
err_config_profile:
+err_cqe_v_check:
err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
@@ -1407,6 +1536,9 @@ err_fw_area_init:
err_doorbell_page_bar:
err_iface_rev:
err_query_fw:
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+err_alloc_irq:
+err_sw_reset:
mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
err_out_mbox_alloc:
mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
@@ -1422,6 +1554,7 @@ static void mlxsw_pci_fini(void *bus_priv)
free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
mlxsw_pci_aqs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
}
@@ -1603,58 +1736,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
- const struct pci_device_id *id)
-{
- unsigned long end;
-
- mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
- if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
- msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
- return 0;
- }
-
- /* Reset needs to be written before we read control register, and
- * we must wait for the HW to become responsive once again
- */
- wmb();
- msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
-
- end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
- do {
- u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
-
- if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
- break;
- cond_resched();
- } while (time_before(jiffies, end));
- return 0;
-}
-
-static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
-{
- pci_free_irq_vectors(mlxsw_pci->pdev);
-}
-
-static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
-{
- int err;
-
- err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
- if (err < 0)
- dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
- return err;
-}
-
-static void mlxsw_pci_reset(void *bus_priv)
-{
- struct mlxsw_pci *mlxsw_pci = bus_priv;
-
- mlxsw_pci_free_irq_vectors(mlxsw_pci);
- mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
- mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
-}
-
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
@@ -1662,8 +1743,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
- .features = MLXSW_BUS_F_TXRX,
- .reset = mlxsw_pci_reset,
+ .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1721,18 +1801,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci, id);
- if (err) {
- dev_err(&pdev->dev, "Software reset failed\n");
- goto err_sw_reset;
- }
-
- err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
- if (err < 0) {
- dev_err(&pdev->dev, "MSI-X init failed\n");
- goto err_msix_init;
- }
-
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1749,9 +1817,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- mlxsw_pci_free_irq_vectors(mlxsw_pci);
-err_msix_init:
-err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
err_ioremap:
err_pci_resource_len_check:
@@ -1769,7 +1834,6 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
- mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index fb082ad21b00..963155f6a17a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -82,10 +82,12 @@
#define MLXSW_PCI_AQ_PAGES 8
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
-#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
-#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
+#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
@@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
*/
MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+enum mlxsw_pci_cqe_v {
+ MLXSW_PCI_CQE_V0,
+ MLXSW_PCI_CQE_V1,
+ MLXSW_PCI_CQE_V2,
+};
+
+#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
+static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
+{ \
+ switch (v) { \
+ default: \
+ case MLXSW_PCI_CQE_V0: \
+ return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
+ case MLXSW_PCI_CQE_V1: \
+ return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
+ case MLXSW_PCI_CQE_V2: \
+ return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
+ } \
+} \
+static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
+ char *cqe, u32 val) \
+{ \
+ switch (v) { \
+ default: \
+ case MLXSW_PCI_CQE_V0: \
+ mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
+ break; \
+ case MLXSW_PCI_CQE_V1: \
+ mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
+ break; \
+ case MLXSW_PCI_CQE_V2: \
+ mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
+ break; \
+ } \
+}
+
/* pci_cqe_lag
* Packet arrives from a port which is a LAG
*/
-MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
+MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
+mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
/* pci_cqe_system_port/lag_id
* When lag=0: System port on which the packet was received
@@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
* bits [3:0] sub_port on which the packet was received
*/
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
-MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
-MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
+MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
+MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
+mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
+MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
+MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
+mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
/* pci_cqe_wqe_counter
* WQE count of the WQEs completed on the associated dqn
@@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
* Length include CRC. Indicates the length field includes
* the packet's CRC.
*/
-MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
+MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
+mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
/* pci_cqe_e
* CQE with Error.
*/
-MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
+MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
+mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
/* pci_cqe_sr
* 1 - Send Queue
* 0 - Receive Queue
*/
-MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
+MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
+mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
/* pci_cqe_dqn
* Descriptor Queue (DQ) Number.
*/
-MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
+MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
+mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
/* pci_cqe_owner
* Ownership bit.
*/
-MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
+MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
+mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
/* pci_eqe_event_type
* Event type.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6218231e379e..1877d9f8a11a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6833,6 +6833,12 @@ enum mlxsw_reg_mpat_span_type {
*/
MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0,
+ /* Remote SPAN Ethernet VLAN.
+ * The packet is forwarded to the monitoring port on the monitoring
+ * VLAN.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH = 0x1,
+
/* Encapsulated Remote SPAN Ethernet L3 GRE.
* The packet is encapsulated with GRE header.
*/
@@ -7028,6 +7034,30 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
}
+/* MRSR - Management Reset and Shutdown Register
+ * ---------------------------------------------
+ * MRSR register is used to reset or shutdown the switch or
+ * the entire system (when applicable).
+ */
+#define MLXSW_REG_MRSR_ID 0x9023
+#define MLXSW_REG_MRSR_LEN 0x08
+
+MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN);
+
+/* reg_mrsr_command
+ * Reset/shutdown command
+ * 0 - do nothing
+ * 1 - software reset
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4);
+
+static inline void mlxsw_reg_mrsr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mrsr, payload);
+ mlxsw_reg_mrsr_command_set(payload, 1);
+}
+
/* MLCR - Management LED Control Register
* --------------------------------------
* Controls the system LEDs.
@@ -7892,6 +7922,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcia),
MLXSW_REG(mpat),
MLXSW_REG(mpar),
+ MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
MLXSW_REG(mcqi),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 087aad52c195..fd9299ccec72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,6 +43,9 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_CQE_V0,
+ MLXSW_RES_ID_CQE_V1,
+ MLXSW_RES_ID_CQE_V2,
MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
@@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_CQE_V0] = 0x2210,
+ [MLXSW_RES_ID_CQE_V1] = 0x2211,
+ [MLXSW_RES_ID_CQE_V2] = 0x2212,
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index adc6ab2cf429..968b88af2ef5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -441,29 +441,29 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
-int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
- u8 state)
+enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- enum mlxsw_reg_spms_state spms_state;
- char *spms_pl;
- int err;
-
switch (state) {
case BR_STATE_FORWARDING:
- spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
- break;
+ return MLXSW_REG_SPMS_STATE_FORWARDING;
case BR_STATE_LEARNING:
- spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
- break;
+ return MLXSW_REG_SPMS_STATE_LEARNING;
case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_BLOCKING:
- spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
- break;
+ return MLXSW_REG_SPMS_STATE_DISCARDING;
default:
BUG();
}
+}
+
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state)
+{
+ enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spms_pl;
+ int err;
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
@@ -1238,21 +1238,10 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- u8 module = mlxsw_sp_port->mapping.module;
- u8 width = mlxsw_sp_port->mapping.width;
- u8 lane = mlxsw_sp_port->mapping.lane;
- int err;
-
- if (!mlxsw_sp_port->split)
- err = snprintf(name, len, "p%d", module + 1);
- else
- err = snprintf(name, len, "p%ds%d", module + 1,
- lane / width);
-
- if (err >= len)
- return -EINVAL;
- return 0;
+ return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
+ mlxsw_sp_port->local_port,
+ name, len);
}
static struct mlxsw_sp_port_mall_tc_entry *
@@ -2927,8 +2916,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
- mlxsw_sp_port, dev, mlxsw_sp_port->split,
- module);
+ mlxsw_sp_port, dev, module + 1,
+ mlxsw_sp_port->split, lane / width);
mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
@@ -3103,7 +3092,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
- unsigned int count)
+ unsigned int count,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3115,6 +3105,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
+ NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
return -EINVAL;
}
@@ -3123,11 +3114,13 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
return -EINVAL;
}
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
return -EINVAL;
}
@@ -3136,6 +3129,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
base_port = local_port;
if (mlxsw_sp->ports[base_port + 1]) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
} else {
@@ -3143,6 +3137,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
if (mlxsw_sp->ports[base_port + 1] ||
mlxsw_sp->ports[base_port + 3]) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
}
@@ -3164,7 +3159,8 @@ err_port_split_create:
return err;
}
-static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3176,11 +3172,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
local_port);
+ NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
return -EINVAL;
}
if (!mlxsw_sp_port->split) {
- netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
+ netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port was not split");
return -EINVAL;
}
@@ -3666,6 +3664,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_lag_init;
}
+ /* Initialize SPAN before router and switchdev, so that those components
+ * can call mlxsw_sp_span_respin().
+ */
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
err = mlxsw_sp_switchdev_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
@@ -3684,15 +3691,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
- err = mlxsw_sp_span_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
- goto err_span_init;
- }
-
- /* Initialize router after SPAN is initialized, so that the FIB and
- * neighbor event handlers can issue SPAN respin.
- */
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3739,14 +3737,14 @@ err_acl_init:
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
- mlxsw_sp_span_fini(mlxsw_sp);
-err_span_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
err_counter_pool_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
@@ -3768,10 +3766,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_acl_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
- mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 804d4d2c8031..4a519d8edec8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -364,6 +364,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
+enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state);
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 1904c0323d39..77b2adb29341 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -442,7 +442,7 @@ struct mlxsw_sp_fib6_entry {
struct mlxsw_sp_rt6 {
struct list_head list;
- struct rt6_info *rt;
+ struct fib6_info *rt;
};
struct mlxsw_sp_lpm_tree {
@@ -2770,9 +2770,9 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
struct in6_addr *gw;
int ifindex, weight;
- ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
- weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
- gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
+ ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
+ weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
+ gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
weight))
return false;
@@ -2838,7 +2838,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->dst.dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
val ^= dev->ifindex;
}
@@ -3834,11 +3834,11 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
for (i = 0; i < nh_grp->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
- struct rt6_info *rt = mlxsw_sp_rt6->rt;
+ struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->dst.dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->rt6i_gateway))
+ &rt->fib6_nh.nh_gw))
return nh;
continue;
}
@@ -3895,7 +3895,7 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
return;
}
@@ -3905,9 +3905,9 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
if (nh && nh->offloaded)
- mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
+ mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
else
- mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
+ mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -3920,9 +3920,9 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct rt6_info *rt = mlxsw_sp_rt6->rt;
+ struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4699,29 +4699,29 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}
-static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
+static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
/* Packets with link-local destination IP arriving to the router
* are trapped to the CPU, so no need to program specific routes
* for them.
*/
- if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
+ if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
return true;
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
- if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
+ if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
return true;
/* Cloned routes are irrelevant in the forwarding path. */
- if (rt->rt6i_flags & RTF_CACHE)
+ if (rt->fib6_flags & RTF_CACHE)
return true;
return false;
}
-static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
+static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
@@ -4734,18 +4734,18 @@ static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
* memory.
*/
mlxsw_sp_rt6->rt = rt;
- rt6_hold(rt);
+ fib6_info_hold(rt);
return mlxsw_sp_rt6;
}
#if IS_ENABLED(CONFIG_IPV6)
-static void mlxsw_sp_rt6_release(struct rt6_info *rt)
+static void mlxsw_sp_rt6_release(struct fib6_info *rt)
{
- rt6_release(rt);
+ fib6_info_release(rt);
}
#else
-static void mlxsw_sp_rt6_release(struct rt6_info *rt)
+static void mlxsw_sp_rt6_release(struct fib6_info *rt)
{
}
#endif
@@ -4756,13 +4756,13 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
kfree(mlxsw_sp_rt6);
}
-static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
+static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+ return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
}
-static struct rt6_info *
+static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
@@ -4771,7 +4771,7 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct rt6_info *nrt, bool replace)
+ const struct fib6_info *nrt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
@@ -4779,21 +4779,21 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
/* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
* virtual router.
*/
- if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
continue;
- if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
- if (rt->rt6i_metric < nrt->rt6i_metric)
+ if (rt->fib6_metric < nrt->fib6_metric)
continue;
- if (rt->rt6i_metric == nrt->rt6i_metric &&
+ if (rt->fib6_metric == nrt->fib6_metric &&
mlxsw_sp_fib6_rt_can_mp(rt))
return fib6_entry;
- if (rt->rt6i_metric > nrt->rt6i_metric)
+ if (rt->fib6_metric > nrt->fib6_metric)
break;
}
@@ -4802,7 +4802,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
- const struct rt6_info *rt)
+ const struct fib6_info *rt)
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
@@ -4815,21 +4815,21 @@ mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
}
static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
- const struct rt6_info *rt,
+ const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->dst.dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
+ return rt->fib6_nh.nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
- const struct rt6_info *rt)
+ const struct fib6_info *rt)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->dst.dev;
+ struct net_device *dev = rt->fib6_nh.nh_dev;
struct mlxsw_sp_rif *rif;
int err;
@@ -4870,13 +4870,13 @@ static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
- const struct rt6_info *rt)
+ const struct fib6_info *rt)
{
- struct net_device *dev = rt->dst.dev;
+ struct net_device *dev = rt->fib6_nh.nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->rt6i_nh_weight;
- memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh.nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -4897,9 +4897,9 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
}
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
- const struct rt6_info *rt)
+ const struct fib6_info *rt)
{
- return rt->rt6i_flags & RTF_GATEWAY ||
+ return rt->fib6_flags & RTF_GATEWAY ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@@ -4928,7 +4928,7 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
nh_grp->count = fib6_entry->nrt6;
for (i = 0; i < nh_grp->count; i++) {
- struct rt6_info *rt = mlxsw_sp_rt6->rt;
+ struct fib6_info *rt = mlxsw_sp_rt6->rt;
nh = &nh_grp->nexthops[i];
err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
@@ -5040,7 +5040,7 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry,
- struct rt6_info *rt)
+ struct fib6_info *rt)
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
int err;
@@ -5068,7 +5068,7 @@ err_nexthop6_group_update:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry,
- struct rt6_info *rt)
+ struct fib6_info *rt)
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
@@ -5084,7 +5084,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- const struct rt6_info *rt)
+ const struct fib6_info *rt)
{
/* Packets hitting RTF_REJECT routes need to be discarded by the
* stack. We can rely on their destination device not having a
@@ -5092,9 +5092,9 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
* local, which will cause them to be trapped with a lower
* priority than packets that need to be locally received.
*/
- if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
+ if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
- else if (rt->rt6i_flags & RTF_REJECT)
+ else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
@@ -5118,7 +5118,7 @@ mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
- struct rt6_info *rt)
+ struct fib6_info *rt)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_entry *fib_entry;
@@ -5168,25 +5168,25 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct rt6_info *nrt, bool replace)
+ const struct fib6_info *nrt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
- if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
continue;
- if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
- if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
+ if (replace && rt->fib6_metric == nrt->fib6_metric) {
if (mlxsw_sp_fib6_rt_can_mp(rt) ==
mlxsw_sp_fib6_rt_can_mp(nrt))
return fib6_entry;
if (mlxsw_sp_fib6_rt_can_mp(nrt))
fallback = fallback ?: fib6_entry;
}
- if (rt->rt6i_metric > nrt->rt6i_metric)
+ if (rt->fib6_metric > nrt->fib6_metric)
return fallback ?: fib6_entry;
}
@@ -5198,7 +5198,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
bool replace)
{
struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
- struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
+ struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
struct mlxsw_sp_fib6_entry *fib6_entry;
fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
@@ -5213,9 +5213,9 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
struct mlxsw_sp_fib6_entry *last;
list_for_each_entry(last, &fib_node->entry_list, common.list) {
- struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
+ struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
- if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
+ if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
break;
fib6_entry = last;
}
@@ -5268,29 +5268,29 @@ mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
- const struct rt6_info *rt)
+ const struct fib6_info *rt)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
if (!vr)
return NULL;
fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
- fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
- sizeof(rt->rt6i_dst.addr),
- rt->rt6i_dst.plen);
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen);
if (!fib_node)
return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
- struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
- if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
- rt->rt6i_metric == iter_rt->rt6i_metric &&
+ if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
+ rt->fib6_metric == iter_rt->fib6_metric &&
mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
return fib6_entry;
}
@@ -5316,7 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct rt6_info *rt, bool replace)
+ struct fib6_info *rt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5325,16 +5325,16 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp->router->aborted)
return 0;
- if (rt->rt6i_src.plen)
+ if (rt->fib6_src.plen)
return -EINVAL;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
return 0;
- fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
- &rt->rt6i_dst.addr,
- sizeof(rt->rt6i_dst.addr),
- rt->rt6i_dst.plen,
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(fib_node))
return PTR_ERR(fib_node);
@@ -5373,7 +5373,7 @@ err_fib6_entry_nexthop_add:
}
static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct rt6_info *rt)
+ struct fib6_info *rt)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5725,6 +5725,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
err = mlxsw_sp_router_fib6_add(mlxsw_sp,
@@ -5831,12 +5832,13 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
fib_work->fen6_info = *fen6_info;
- rt6_hold(fib_work->fen6_info.rt);
+ fib6_info_hold(fib_work->fen6_info.rt);
break;
}
}
@@ -5882,24 +5884,24 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
switch (info->family) {
case AF_INET:
if (!fib4_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
case AF_INET6:
if (!fib6_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
case RTNL_FAMILY_IPMR:
if (!ipmr_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
case RTNL_FAMILY_IP6MR:
if (!ip6mr_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
}
if (err < 0)
- NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
+ NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
return err;
}
@@ -5926,8 +5928,15 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
- if (!err)
- return NOTIFY_DONE;
+ if (!err || info->extack)
+ return notifier_from_errno(err);
+ break;
+ case FIB_EVENT_ENTRY_ADD:
+ if (router->aborted) {
+ NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
+ return notifier_from_errno(-EINVAL);
+ }
+ break;
}
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 65a77708ff61..3d187d88cc7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -32,6 +32,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/if_bridge.h>
#include <linux/list.h>
#include <net/arp.h>
#include <net/gre.h>
@@ -39,8 +40,9 @@
#include <net/ip6_tunnel.h>
#include "spectrum.h"
-#include "spectrum_span.h"
#include "spectrum_ipip.h"
+#include "spectrum_span.h"
+#include "spectrum_switchdev.h"
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
@@ -135,14 +137,14 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
const void *pkey,
- struct net_device *l3edev,
+ struct net_device *dev,
unsigned char dmac[ETH_ALEN])
{
- struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev);
+ struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
int err = 0;
if (!neigh) {
- neigh = neigh_create(tbl, pkey, l3edev);
+ neigh = neigh_create(tbl, pkey, dev);
if (IS_ERR(neigh))
return PTR_ERR(neigh);
}
@@ -167,8 +169,99 @@ mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
return 0;
}
+static struct net_device *
+mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
+ unsigned char *dmac,
+ u16 *p_vid)
+{
+ struct bridge_vlan_info vinfo;
+ struct net_device *edev;
+ u16 vid = *p_vid;
+
+ if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
+ return NULL;
+ if (!vid ||
+ br_vlan_get_info(br_dev, vid, &vinfo) ||
+ !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return NULL;
+
+ edev = br_fdb_find_port(br_dev, dmac, vid);
+ if (!edev)
+ return NULL;
+
+ if (br_vlan_get_info(edev, vid, &vinfo))
+ return NULL;
+ if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ *p_vid = 0;
+ else
+ *p_vid = vid;
+ return edev;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
+ unsigned char *dmac)
+{
+ return br_fdb_find_port(br_dev, dmac, 0);
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
+ unsigned char dmac[ETH_ALEN],
+ u16 *p_vid)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ enum mlxsw_reg_spms_state spms_state;
+ struct net_device *dev = NULL;
+ struct mlxsw_sp_port *port;
+ u8 stp_state;
+
+ if (br_vlan_enabled(br_dev))
+ dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
+ else if (!*p_vid)
+ dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
+ if (!dev)
+ return NULL;
+
+ port = mlxsw_sp_port_dev_lower_find(dev);
+ if (!port)
+ return NULL;
+
+ bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
+ if (!bridge_port)
+ return NULL;
+
+ stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
+ spms_state = mlxsw_sp_stp_spms_state(stp_state);
+ if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
+ return NULL;
+
+ return dev;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
+ u16 *p_vid)
+{
+ *p_vid = vlan_dev_vlan_id(vlan_dev);
+ return vlan_dev_real_dev(vlan_dev);
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter)
+ if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+ return dev;
+
+ return NULL;
+}
+
static __maybe_unused int
-mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
+mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
union mlxsw_sp_l3addr saddr,
union mlxsw_sp_l3addr daddr,
union mlxsw_sp_l3addr gw,
@@ -177,21 +270,51 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
struct mlxsw_sp_span_parms *sparmsp)
{
unsigned char dmac[ETH_ALEN];
+ u16 vid = 0;
if (mlxsw_sp_l3addr_is_zero(gw))
gw = daddr;
- if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) ||
- mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
- return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+ if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
+ goto unoffloadable;
- sparmsp->dest_port = netdev_priv(l3edev);
+ if (is_vlan_dev(edev))
+ edev = mlxsw_sp_span_entry_vlan(edev, &vid);
+
+ if (netif_is_bridge_master(edev)) {
+ edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
+ if (!edev)
+ goto unoffloadable;
+ }
+
+ if (is_vlan_dev(edev)) {
+ if (vid || !(edev->flags & IFF_UP))
+ goto unoffloadable;
+ edev = mlxsw_sp_span_entry_vlan(edev, &vid);
+ }
+
+ if (netif_is_lag_master(edev)) {
+ if (!(edev->flags & IFF_UP))
+ goto unoffloadable;
+ edev = mlxsw_sp_span_entry_lag(edev);
+ if (!edev)
+ goto unoffloadable;
+ }
+
+ if (!mlxsw_sp_port_dev_check(edev))
+ goto unoffloadable;
+
+ sparmsp->dest_port = netdev_priv(edev);
sparmsp->ttl = ttl;
memcpy(sparmsp->dmac, dmac, ETH_ALEN);
- memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
+ memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
sparmsp->saddr = saddr;
sparmsp->daddr = daddr;
+ sparmsp->vid = vid;
return 0;
+
+unoffloadable:
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
}
#if IS_ENABLED(CONFIG_NET_IPGRE)
@@ -268,9 +391,10 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
/* Create a new port analayzer entry for local_port. */
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
- sparms.dmac, false);
+ sparms.dmac, !!sparms.vid);
mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
sparms.ttl, sparms.smac,
be32_to_cpu(sparms.saddr.addr4),
@@ -368,9 +492,10 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
/* Create a new port analayzer entry for local_port. */
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
- sparms.dmac, false);
+ sparms.dmac, !!sparms.vid);
mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
sparms.saddr.addr6,
sparms.daddr.addr6);
@@ -394,6 +519,61 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
};
#endif
+static bool
+mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
+{
+ return is_vlan_dev(dev) &&
+ mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
+}
+
+static int
+mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct net_device *real_dev;
+ u16 vid;
+
+ if (!(to_dev->flags & IFF_UP))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
+ sparmsp->dest_port = netdev_priv(real_dev);
+ sparmsp->vid = vid;
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
+ .can_handle = mlxsw_sp_span_vlan_can_handle,
+ .parms = mlxsw_sp_span_entry_vlan_parms,
+ .configure = mlxsw_sp_span_entry_vlan_configure,
+ .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
+};
+
static const
struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
&mlxsw_sp_span_entry_ops_phys,
@@ -403,6 +583,7 @@ struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
#if IS_ENABLED(CONFIG_IPV6_GRE)
&mlxsw_sp_span_entry_ops_gretap6,
#endif
+ &mlxsw_sp_span_entry_ops_vlan,
};
static int
@@ -766,7 +947,7 @@ int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
if (!span_entry)
- return -ENOENT;
+ return -ENOBUFS;
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
span_entry->id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 4b87ec20e658..14a6de904db1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -63,6 +63,7 @@ struct mlxsw_sp_span_parms {
unsigned char smac[ETH_ALEN];
union mlxsw_sp_l3addr daddr;
union mlxsw_sp_l3addr saddr;
+ u16 vid;
};
struct mlxsw_sp_span_entry_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 4ed01182a82c..e97652c40d13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -49,7 +49,9 @@
#include <linux/netlink.h>
#include <net/switchdev.h>
+#include "spectrum_span.h"
#include "spectrum_router.h"
+#include "spectrum_switchdev.h"
#include "spectrum.h"
#include "core.h"
#include "reg.h"
@@ -239,7 +241,7 @@ __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
return NULL;
}
-static struct mlxsw_sp_bridge_port *
+struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
{
@@ -922,6 +924,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
}
+ if (switchdev_trans_ph_commit(trans))
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
return err;
}
@@ -1139,6 +1144,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
+ if (netif_is_bridge_master(orig_dev))
+ return -EOPNOTSUPP;
+
if (switchdev_trans_ph_prepare(trans))
return 0;
@@ -1646,18 +1654,57 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+struct mlxsw_sp_span_respin_work {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
+{
+ struct mlxsw_sp_span_respin_work *respin_work =
+ container_of(work, struct mlxsw_sp_span_respin_work, work);
+
+ rtnl_lock();
+ mlxsw_sp_span_respin(respin_work->mlxsw_sp);
+ rtnl_unlock();
+ kfree(respin_work);
+}
+
+static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_span_respin_work *respin_work;
+
+ respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
+ if (!respin_work)
+ return;
+
+ INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
+ respin_work->mlxsw_sp = mlxsw_sp;
+
+ mlxsw_core_schedule_work(&respin_work->work);
+}
+
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ }
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
@@ -1697,6 +1744,9 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
+ if (netif_is_bridge_master(orig_dev))
+ return -EOPNOTSUPP;
+
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1806,6 +1856,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
break;
}
+ mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+
return err;
}
@@ -2222,6 +2274,8 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
if (err)
break;
@@ -2231,10 +2285,20 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
break;
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ /* These events are only used to potentially update an existing
+ * SPAN mirror.
+ */
+ break;
}
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
out:
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
@@ -2263,7 +2327,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
@@ -2295,6 +2361,12 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = {
.notifier_call = mlxsw_sp_switchdev_event,
};
+u8
+mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ return bridge_port->stp_state;
+}
+
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
new file mode 100644
index 000000000000..bc44d5effc28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_bridge_port;
+
+struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev);
+
+u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index a655c5850aa6..3922c1cfe5f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -417,13 +417,10 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- int err;
-
- err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
- if (err >= len)
- return -EINVAL;
- return 0;
+ return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
+ mlxsw_sx_port->local_port,
+ name, len);
}
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
@@ -1149,7 +1146,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port, dev, false, 0);
+ mlxsw_sx_port, dev, module + 1, false, 0);
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
new file mode 100644
index 000000000000..36c84625d54e
--- /dev/null
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+config NET_VENDOR_MICROSEMI
+ bool "Microsemi devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Microsemi devices.
+
+if NET_VENDOR_MICROSEMI
+
+config MSCC_OCELOT_SWITCH
+ tristate "Ocelot switch driver"
+ depends on NET_SWITCHDEV
+ depends on HAS_IOMEM
+ select PHYLIB
+ select REGMAP_MMIO
+ help
+ This driver supports the Ocelot network switch device.
+
+config MSCC_OCELOT_SWITCH_OCELOT
+ tristate "Ocelot switch driver on Ocelot"
+ depends on MSCC_OCELOT_SWITCH
+ help
+ This driver supports the Ocelot network switch device as present on
+ the Ocelot SoCs.
+
+endif # NET_VENDOR_MICROSEMI
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
new file mode 100644
index 000000000000..cb52a3b128ae
--- /dev/null
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
+mscc_ocelot_common-y := ocelot.o ocelot_io.o
+mscc_ocelot_common-y += ocelot_regs.o
+obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
new file mode 100644
index 000000000000..c8c74aa548d9
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <net/arp.h>
+#include <net/netevent.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
+
+#include "ocelot.h"
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACv4,
+ ENTRYTYPE_MACv6,
+};
+
+struct ocelot_mact_entry {
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ enum macaccess_entry_type type;
+};
+
+static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+{
+ unsigned int val, timeout = 10;
+
+ /* Wait for the issued mac table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_MACACCESS is
+ * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ */
+ do {
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+ val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M;
+ } while (val != MACACCESS_CMD_IDLE && timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void ocelot_mact_select(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
+ ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
+
+}
+
+static int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
+ ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+}
+
+static int ocelot_mact_forget(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a forget command */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
+ ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+}
+
+static void ocelot_mact_init(struct ocelot *ocelot)
+{
+ /* Configure the learning mode entries attributes:
+ * - Do not copy the frame to the CPU extraction queues.
+ * - Use the vlan and mac_cpoy for dmac lookup.
+ */
+ ocelot_rmw(ocelot, 0,
+ ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
+ | ANA_AGENCTRL_LEARN_FWD_KILL
+ | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
+ ANA_AGENCTRL);
+
+ /* Clear the MAC table */
+ ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
+}
+
+static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+{
+ unsigned int val, timeout = 10;
+
+ /* Wait for the issued mac table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_MACACCESS is
+ * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ */
+ do {
+ val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
+ val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M;
+ } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void ocelot_vlan_init(struct ocelot *ocelot)
+{
+ /* Clear VLAN table, by default all ports are members of all VLANs */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
+ ANA_TABLES_VLANACCESS);
+ ocelot_vlant_wait_for_completion(ocelot);
+}
+
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+static u16 ocelot_wm_enc(u16 value)
+{
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+
+static void ocelot_port_adjust_link(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ u8 p = port->chip_port;
+ int speed, atop_wm, mode = 0;
+
+ switch (dev->phydev->speed) {
+ case SPEED_10:
+ speed = OCELOT_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = OCELOT_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = OCELOT_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ break;
+ case SPEED_2500:
+ speed = OCELOT_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ break;
+ default:
+ netdev_err(dev, "Unsupported PHY speed: %d\n",
+ dev->phydev->speed);
+ return;
+ }
+
+ phy_print_status(dev->phydev);
+
+ if (!dev->phydev->link)
+ return;
+
+ /* Only full duplex supported for now */
+ ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA |
+ mode, DEV_MAC_MODE_CFG);
+
+ /* Set MAC IFG Gaps
+ * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
+ * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
+ */
+ ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG);
+
+ /* Load seed (0) and set MAC HDX late collision */
+ ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ DEV_MAC_HDX_CFG);
+ mdelay(1);
+ ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
+ DEV_MAC_HDX_CFG);
+
+ /* Disable HDX fast control */
+ ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG);
+ ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(port, 0, PCS1G_LB_CFG);
+
+ /* Set Max Length and maximum tags allowed */
+ ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG);
+ ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ /* Enable MAC module */
+ ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA |
+ DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+
+ /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
+ * reset */
+ ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed),
+ DEV_CLOCK_CFG);
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
+ ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG);
+
+ /* No PFC */
+ ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
+ ANA_PFC_PFC_CFG, p);
+
+ /* Set Pause WM hysteresis
+ * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
+ */
+ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
+ SYS_PAUSE_CFG_PAUSE_STOP(101) |
+ SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p);
+
+ /* Core: Enable port for frame transfer */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, p);
+
+ /* Flow control */
+ ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
+ SYS_MAC_FC_CFG, p);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p);
+
+ /* Tail dropping watermark */
+ atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN),
+ SYS_ATOP, p);
+ ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+}
+
+static int ocelot_port_open(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
+ ANA_PORT_PORT_CFG, port->chip_port);
+
+ err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
+ PHY_INTERFACE_MODE_NA);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ dev->phydev = port->phy;
+
+ phy_attached_info(port->phy);
+ phy_start(port->phy);
+ return 0;
+}
+
+static int ocelot_port_stop(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+
+ phy_disconnect(port->phy);
+
+ dev->phydev = NULL;
+
+ ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port->chip_port);
+ return 0;
+}
+
+/* Generate the IFH for frame injection
+ *
+ * The IFH is a 128bit-value
+ * bit 127: bypass the analyzer processing
+ * bit 56-67: destination mask
+ * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
+ * bit 20-27: cpu extraction queue mask
+ * bit 16: tag type 0: C-tag, 1: S-tag
+ * bit 0-11: VID
+ */
+static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
+{
+ ifh[0] = IFH_INJ_BYPASS;
+ ifh[1] = (0xff00 & info->port) >> 8;
+ ifh[2] = (0xff & info->port) << 24;
+ ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
+ (info->tag_type << 16) | info->vid;
+
+ return 0;
+}
+
+static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ u32 val, ifh[IFH_LEN];
+ struct frame_info info = {};
+ u8 grp = 0; /* Send everything on CPU group 0 */
+ unsigned int i, count, last;
+
+ val = ocelot_read(ocelot, QS_INJ_STATUS);
+ if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
+ (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
+ return NETDEV_TX_BUSY;
+
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+ info.port = BIT(port->chip_port);
+ info.cpuq = 0xff;
+ ocelot_gen_ifh(ifh, &info);
+
+ for (i = 0; i < IFH_LEN; i++)
+ ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+
+ count = (skb->len + 3) / 4;
+ last = skb->len % 4;
+ for (i = 0; i < count; i++) {
+ ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
+ }
+
+ /* Add padding */
+ while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ i++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
+ QS_INJ_CTRL_EOF,
+ QS_INJ_CTRL, grp);
+
+ /* Add dummy CRC */
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void ocelot_mact_mc_reset(struct ocelot_port *port)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct netdev_hw_addr *ha, *n;
+
+ /* Free and forget all the MAC addresses stored in the port private mc
+ * list. These are mc addresses that were previously added by calling
+ * ocelot_mact_mc_add().
+ */
+ list_for_each_entry_safe(ha, n, &port->mc, list) {
+ ocelot_mact_forget(ocelot, ha->addr, port->pvid);
+ list_del(&ha->list);
+ kfree(ha);
+ }
+}
+
+static int ocelot_mact_mc_add(struct ocelot_port *port,
+ struct netdev_hw_addr *hw_addr)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+
+ if (!ha)
+ return -ENOMEM;
+
+ memcpy(ha, hw_addr, sizeof(*ha));
+ list_add_tail(&ha->list, &port->mc);
+
+ ocelot_mact_learn(ocelot, PGID_CPU, ha->addr, port->pvid,
+ ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+static void ocelot_set_rx_mode(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ struct netdev_hw_addr *ha;
+ int i;
+ u32 val;
+
+ /* This doesn't handle promiscuous mode because the bridge core is
+ * setting IFF_PROMISC on all slave interfaces and all frames would be
+ * forwarded to the CPU port.
+ */
+ val = GENMASK(ocelot->num_phys_ports - 1, 0);
+ for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++)
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+
+ /* Handle the device multicast addresses. First remove all the
+ * previously installed addresses and then add the latest ones to the
+ * mac table.
+ */
+ ocelot_mact_mc_reset(port);
+ netdev_for_each_mc_addr(ha, dev)
+ ocelot_mact_mc_add(port, ha);
+}
+
+static int ocelot_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ const struct sockaddr *addr = p;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid,
+ ENTRYTYPE_LOCKED);
+ /* Then forget the previous one. */
+ ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid);
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+}
+
+static void ocelot_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port),
+ SYS_STAT_CFG);
+
+ /* Get Rx stats */
+ stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
+ stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_64) +
+ ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
+ ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
+ ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
+ ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
+ ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
+ stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ /* Get Tx stats */
+ stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
+ stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
+ ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
+ ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
+ ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
+ ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
+ ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
+ stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
+ ocelot_read(ocelot, SYS_COUNT_TX_AGING);
+ stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+}
+
+static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 flags)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
+ ENTRYTYPE_NORMAL);
+}
+
+static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ return ocelot_mact_forget(ocelot, addr, vid);
+}
+
+struct ocelot_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
+ struct ocelot_dump_ctx *dump)
+{
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac))
+ goto nla_put_failure;
+
+ if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
+ struct ocelot_mact_entry *entry)
+{
+ struct ocelot *ocelot = port->ocelot;
+ char mac[ETH_ALEN];
+ u32 val, dst, macl, mach;
+
+ /* Set row and column to read from */
+ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
+ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
+
+ /* Issue a read command */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
+ ANA_TABLES_MACACCESS);
+
+ if (ocelot_mact_wait_for_completion(ocelot))
+ return -ETIMEDOUT;
+
+ /* Read the entry flags */
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+ if (!(val & ANA_TABLES_MACACCESS_VALID))
+ return -EINVAL;
+
+ /* If the entry read has another port configured as its destination,
+ * do not report it.
+ */
+ dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
+ if (dst != port->chip_port)
+ return -EINVAL;
+
+ /* Get the entry's MAC address and VLAN id */
+ macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
+ mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
+
+ mac[0] = (mach >> 8) & 0xff;
+ mac[1] = (mach >> 0) & 0xff;
+ mac[2] = (macl >> 24) & 0xff;
+ mac[3] = (macl >> 16) & 0xff;
+ mac[4] = (macl >> 8) & 0xff;
+ mac[5] = (macl >> 0) & 0xff;
+
+ entry->vid = (mach >> 16) & 0xfff;
+ ether_addr_copy(entry->mac, mac);
+
+ return 0;
+}
+
+static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ int i, j, ret = 0;
+ struct ocelot_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+
+ struct ocelot_mact_entry entry;
+
+ /* Loop through all the mac tables entries. There are 1024 rows of 4
+ * entries.
+ */
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 4; j++) {
+ ret = ocelot_mact_read(port, i, j, &entry);
+ /* If the entry is invalid (wrong port, invalid...),
+ * skip it.
+ */
+ if (ret == -EINVAL)
+ continue;
+ else if (ret)
+ goto end;
+
+ ret = ocelot_fdb_do_dump(&entry, &dump);
+ if (ret)
+ goto end;
+ }
+ }
+
+end:
+ *idx = dump.idx;
+ return ret;
+}
+
+static const struct net_device_ops ocelot_port_netdev_ops = {
+ .ndo_open = ocelot_port_open,
+ .ndo_stop = ocelot_port_stop,
+ .ndo_start_xmit = ocelot_port_xmit,
+ .ndo_set_rx_mode = ocelot_set_rx_mode,
+ .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
+ .ndo_set_mac_address = ocelot_port_set_mac_address,
+ .ndo_get_stats64 = ocelot_get_stats64,
+ .ndo_fdb_add = ocelot_fdb_add,
+ .ndo_fdb_del = ocelot_fdb_del,
+ .ndo_fdb_dump = ocelot_fdb_dump,
+};
+
+static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct ocelot_port *port = netdev_priv(netdev);
+ struct ocelot *ocelot = port->ocelot;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ocelot->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void ocelot_check_stats(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
+ int i, j;
+
+ mutex_lock(&ocelot->stats_lock);
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+
+ for (j = 0; j < ocelot->num_stats; j++) {
+ u32 val;
+ unsigned int idx = i * ocelot->num_stats + j;
+
+ val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
+ ocelot->stats_layout[j].offset);
+
+ if (val < (ocelot->stats[idx] & U32_MAX))
+ ocelot->stats[idx] += (u64)1 << 32;
+
+ ocelot->stats[idx] = (ocelot->stats[idx] &
+ ~(u64)U32_MAX) + val;
+ }
+ }
+
+ cancel_delayed_work(&ocelot->stats_work);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+
+ mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int i;
+
+ /* check and update now */
+ ocelot_check_stats(&ocelot->stats_work.work);
+
+ /* Copy all counters */
+ for (i = 0; i < ocelot->num_stats; i++)
+ *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i];
+}
+
+static int ocelot_get_sset_count(struct net_device *dev, int sset)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+ return ocelot->num_stats;
+}
+
+static const struct ethtool_ops ocelot_ethtool_ops = {
+ .get_strings = ocelot_get_strings,
+ .get_ethtool_stats = ocelot_get_ethtool_stats,
+ .get_sset_count = ocelot_get_sset_count,
+};
+
+static int ocelot_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(ocelot->base_mac);
+ memcpy(&attr->u.ppid.id, &ocelot->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ u32 port_cfg;
+ int port, i;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask))
+ return 0;
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG,
+ ocelot_port->chip_port);
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port);
+ /* Fallthrough */
+ case BR_STATE_LEARNING:
+ port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
+ break;
+
+ default:
+ port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
+ ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port);
+ break;
+ }
+
+ ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG,
+ ocelot_port->chip_port);
+
+ /* Apply FWD mask. The loop is needed to add/remove the current port as
+ * a source for the other ports.
+ */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (ocelot->bridge_fwd_mask & BIT(port)) {
+ unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port);
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ unsigned long bond_mask = ocelot->lags[i];
+
+ if (!bond_mask)
+ continue;
+
+ if (bond_mask & BIT(port)) {
+ mask &= ~bond_mask;
+ break;
+ }
+ }
+
+ ocelot_write_rix(ocelot,
+ BIT(ocelot->num_phys_ports) | mask,
+ ANA_PGID_PGID, PGID_SRC + port);
+ } else {
+ /* Only the CPU port, this is compatible with link
+ * aggregation.
+ */
+ ocelot_write_rix(ocelot,
+ BIT(ocelot->num_phys_ports),
+ ANA_PGID_PGID, PGID_SRC + port);
+ }
+ }
+
+ return 0;
+}
+
+static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port,
+ unsigned long ageing_clock_t)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2),
+ ANA_AUTOAGE);
+}
+
+static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG,
+ port->chip_port);
+
+ if (mc)
+ val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+ else
+ val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA);
+
+ ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port);
+}
+
+static int ocelot_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ocelot_port_attr_stp_state_set(ocelot_port, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct ocelot_multicast *mc;
+
+ list_for_each_entry(mc, &ocelot->multicast, list) {
+ if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
+ return mc;
+ }
+
+ return NULL;
+}
+
+static int ocelot_port_obj_add_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ struct ocelot_multicast *mc;
+ unsigned char addr[ETH_ALEN];
+ u16 vid = mdb->vid;
+ bool new = false;
+
+ if (!vid)
+ vid = 1;
+
+ mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
+ if (!mc) {
+ mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ memcpy(mc->addr, mdb->addr, ETH_ALEN);
+ mc->vid = vid;
+
+ list_add_tail(&mc->list, &ocelot->multicast);
+ new = true;
+ }
+
+ memcpy(addr, mc->addr, ETH_ALEN);
+ addr[0] = 0;
+
+ if (!new) {
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+ ocelot_mact_forget(ocelot, addr, vid);
+ }
+
+ mc->ports |= BIT(port->chip_port);
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+
+ return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4);
+}
+
+static int ocelot_port_obj_del_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ struct ocelot_multicast *mc;
+ unsigned char addr[ETH_ALEN];
+ u16 vid = mdb->vid;
+
+ if (!vid)
+ vid = 1;
+
+ mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
+ if (!mc)
+ return -ENOENT;
+
+ memcpy(addr, mc->addr, ETH_ALEN);
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+ addr[0] = 0;
+ ocelot_mact_forget(ocelot, addr, vid);
+
+ mc->ports &= ~BIT(port->chip_port);
+ if (!mc->ports) {
+ list_del(&mc->list);
+ devm_kfree(ocelot->dev, mc);
+ return 0;
+ }
+
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+
+ return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4);
+}
+
+static int ocelot_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ocelot_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct switchdev_ops ocelot_port_switchdev_ops = {
+ .switchdev_port_attr_get = ocelot_port_attr_get,
+ .switchdev_port_attr_set = ocelot_port_attr_set,
+ .switchdev_port_obj_add = ocelot_port_obj_add,
+ .switchdev_port_obj_del = ocelot_port_obj_del,
+};
+
+static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
+ struct net_device *bridge)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (!ocelot->bridge_mask) {
+ ocelot->hw_bridge_dev = bridge;
+ } else {
+ if (ocelot->hw_bridge_dev != bridge)
+ /* This is adding the port to a second bridge, this is
+ * unsupported */
+ return -ENODEV;
+ }
+
+ ocelot->bridge_mask |= BIT(ocelot_port->chip_port);
+
+ return 0;
+}
+
+static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
+ struct net_device *bridge)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port);
+
+ if (!ocelot->bridge_mask)
+ ocelot->hw_bridge_dev = NULL;
+}
+
+/* Checks if the net_device instance given to us originate from our driver. */
+static bool ocelot_netdevice_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ocelot_port_netdev_ops;
+}
+
+static int ocelot_netdevice_port_event(struct net_device *dev,
+ unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ int err = 0;
+
+ if (!ocelot_netdevice_dev_check(dev))
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_bridge_join(ocelot_port,
+ info->upper_dev);
+ else
+ ocelot_port_bridge_leave(ocelot_port,
+ info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ocelot_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (netif_is_lag_master(dev)) {
+ struct net_device *slave;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, slave, iter) {
+ ret = ocelot_netdevice_port_event(slave, event, info);
+ if (ret)
+ goto notify;
+ }
+ } else {
+ ret = ocelot_netdevice_port_event(dev, event, info);
+ }
+
+notify:
+ return notifier_from_errno(ret);
+}
+
+struct notifier_block ocelot_netdevice_nb __read_mostly = {
+ .notifier_call = ocelot_netdevice_event,
+};
+EXPORT_SYMBOL(ocelot_netdevice_nb);
+
+int ocelot_probe_port(struct ocelot *ocelot, u8 port,
+ void __iomem *regs,
+ struct phy_device *phy)
+{
+ struct ocelot_port *ocelot_port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct ocelot_port));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, ocelot->dev);
+ ocelot_port = netdev_priv(dev);
+ ocelot_port->dev = dev;
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->regs = regs;
+ ocelot_port->chip_port = port;
+ ocelot_port->phy = phy;
+ INIT_LIST_HEAD(&ocelot_port->mc);
+ ocelot->ports[port] = ocelot_port;
+
+ dev->netdev_ops = &ocelot_port_netdev_ops;
+ dev->ethtool_ops = &ocelot_ethtool_ops;
+ dev->switchdev_ops = &ocelot_port_switchdev_ops;
+
+ memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
+ dev->dev_addr[ETH_ALEN - 1] += port;
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
+ ENTRYTYPE_LOCKED);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(ocelot->dev, "register_netdev failed\n");
+ goto err_register_netdev;
+ }
+
+ return 0;
+
+err_register_netdev:
+ free_netdev(dev);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_probe_port);
+
+int ocelot_init(struct ocelot *ocelot)
+{
+ u32 port;
+ int i, cpu = ocelot->num_phys_ports;
+ char queue_name[32];
+
+ ocelot->stats = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports * ocelot->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!ocelot->stats)
+ return -ENOMEM;
+
+ mutex_init(&ocelot->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(ocelot->dev));
+ ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!ocelot->stats_queue)
+ return -ENOMEM;
+
+ ocelot_mact_init(ocelot);
+ ocelot_vlan_init(ocelot);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ /* Clear all counters (5 groups) */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
+ SYS_STAT_CFG);
+ }
+
+ /* Only use S-Tag */
+ ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
+
+ /* Aggregation mode */
+ ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
+ ANA_AGGR_CFG_AC_DMAC_ENA |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG);
+
+ /* Set MAC age time to default value. The entry is aged after
+ * 2*AGE_PERIOD
+ */
+ ocelot_write(ocelot,
+ ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
+
+ /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
+ ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
+ SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
+
+ /* Setup flooding PGIDs */
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, 0);
+ ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
+ ANA_FLOODING_IPMC);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ /* Transmit the frame to the local port. */
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+ /* Do not forward BPDU frames to the front ports. */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ port);
+ /* Ensure bridging is disabled */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
+ }
+
+ /* Configure and enable the CPU port. */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
+ ANA_PORT_PORT_CFG, cpu);
+
+ /* Allow broadcast MAC frames. */
+ for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) {
+ u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
+
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+ }
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MC);
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
+
+ /* CPU port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, cpu);
+ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) |
+ SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu);
+ /* Allow manual injection via DEVCPU_QS registers, and byte swap these
+ * registers endianness.
+ */
+ ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
+ QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
+ ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
+ QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
+ ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
+ ANA_CPUQ_CFG_CPUQ_LRN(2) |
+ ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
+ ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
+ ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
+ ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
+ ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
+ ANA_CPUQ_CFG_CPUQ_IGMP(6) |
+ ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
+ for (i = 0; i < 16; i++)
+ ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
+ ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
+ ANA_CPUQ_8021_CFG, i);
+
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init);
+
+void ocelot_deinit(struct ocelot *ocelot)
+{
+ destroy_workqueue(ocelot->stats_queue);
+ mutex_destroy(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_deinit);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
new file mode 100644
index 000000000000..097bd12a10d4
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_H_
+#define _MSCC_OCELOT_H_
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "ocelot_ana.h"
+#include "ocelot_dev.h"
+#include "ocelot_hsio.h"
+#include "ocelot_qsys.h"
+#include "ocelot_rew.h"
+#include "ocelot_sys.h"
+#include "ocelot_qs.h"
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 5)
+#define PGID_UC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+#define OCELOT_BUFFER_CELL_SZ 60
+
+#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
+
+#define IFH_LEN 4
+
+struct frame_info {
+ u32 len;
+ u16 port;
+ u16 vid;
+ u8 cpuq;
+ u8 tag_type;
+};
+
+#define IFH_INJ_BYPASS BIT(31)
+#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define OCELOT_SPEED_2500 0
+#define OCELOT_SPEED_1000 1
+#define OCELOT_SPEED_100 2
+#define OCELOT_SPEED_10 3
+
+#define TARGET_OFFSET 24
+#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
+#define REG(reg, offset) [reg & REG_MASK] = offset
+
+enum ocelot_target {
+ ANA = 1,
+ QS,
+ QSYS,
+ REW,
+ SYS,
+ HSIO,
+ TARGET_MAX,
+};
+
+enum ocelot_reg {
+ ANA_ADVLEARN = ANA << TARGET_OFFSET,
+ ANA_VLANMASK,
+ ANA_PORT_B_DOMAIN,
+ ANA_ANAGEFIL,
+ ANA_ANEVENTS,
+ ANA_STORMLIMIT_BURST,
+ ANA_STORMLIMIT_CFG,
+ ANA_ISOLATED_PORTS,
+ ANA_COMMUNITY_PORTS,
+ ANA_AUTOAGE,
+ ANA_MACTOPTIONS,
+ ANA_LEARNDISC,
+ ANA_AGENCTRL,
+ ANA_MIRRORPORTS,
+ ANA_EMIRRORPORTS,
+ ANA_FLOODING,
+ ANA_FLOODING_IPMC,
+ ANA_SFLOW_CFG,
+ ANA_PORT_MODE,
+ ANA_CUT_THRU_CFG,
+ ANA_PGID_PGID,
+ ANA_TABLES_ANMOVED,
+ ANA_TABLES_MACHDATA,
+ ANA_TABLES_MACLDATA,
+ ANA_TABLES_STREAMDATA,
+ ANA_TABLES_MACACCESS,
+ ANA_TABLES_MACTINDX,
+ ANA_TABLES_VLANACCESS,
+ ANA_TABLES_VLANTIDX,
+ ANA_TABLES_ISDXACCESS,
+ ANA_TABLES_ISDXTIDX,
+ ANA_TABLES_ENTRYLIM,
+ ANA_TABLES_PTP_ID_HIGH,
+ ANA_TABLES_PTP_ID_LOW,
+ ANA_TABLES_STREAMACCESS,
+ ANA_TABLES_STREAMTIDX,
+ ANA_TABLES_SEQ_HISTORY,
+ ANA_TABLES_SEQ_MASK,
+ ANA_TABLES_SFID_MASK,
+ ANA_TABLES_SFIDACCESS,
+ ANA_TABLES_SFIDTIDX,
+ ANA_MSTI_STATE,
+ ANA_OAM_UPM_LM_CNT,
+ ANA_SG_ACCESS_CTRL,
+ ANA_SG_CONFIG_REG_1,
+ ANA_SG_CONFIG_REG_2,
+ ANA_SG_CONFIG_REG_3,
+ ANA_SG_CONFIG_REG_4,
+ ANA_SG_CONFIG_REG_5,
+ ANA_SG_GCL_GS_CONFIG,
+ ANA_SG_GCL_TI_CONFIG,
+ ANA_SG_STATUS_REG_1,
+ ANA_SG_STATUS_REG_2,
+ ANA_SG_STATUS_REG_3,
+ ANA_PORT_VLAN_CFG,
+ ANA_PORT_DROP_CFG,
+ ANA_PORT_QOS_CFG,
+ ANA_PORT_VCAP_CFG,
+ ANA_PORT_VCAP_S1_KEY_CFG,
+ ANA_PORT_VCAP_S2_CFG,
+ ANA_PORT_PCP_DEI_MAP,
+ ANA_PORT_CPU_FWD_CFG,
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ ANA_PORT_CPU_FWD_GARP_CFG,
+ ANA_PORT_CPU_FWD_CCM_CFG,
+ ANA_PORT_PORT_CFG,
+ ANA_PORT_POL_CFG,
+ ANA_PORT_PTP_CFG,
+ ANA_PORT_PTP_DLY1_CFG,
+ ANA_PORT_PTP_DLY2_CFG,
+ ANA_PORT_SFID_CFG,
+ ANA_PFC_PFC_CFG,
+ ANA_PFC_PFC_TIMER,
+ ANA_IPT_OAM_MEP_CFG,
+ ANA_IPT_IPT,
+ ANA_PPT_PPT,
+ ANA_FID_MAP_FID_MAP,
+ ANA_AGGR_CFG,
+ ANA_CPUQ_CFG,
+ ANA_CPUQ_CFG2,
+ ANA_CPUQ_8021_CFG,
+ ANA_DSCP_CFG,
+ ANA_DSCP_REWR_CFG,
+ ANA_VCAP_RNG_TYPE_CFG,
+ ANA_VCAP_RNG_VAL_CFG,
+ ANA_VRAP_CFG,
+ ANA_VRAP_HDR_DATA,
+ ANA_VRAP_HDR_MASK,
+ ANA_DISCARD_CFG,
+ ANA_FID_CFG,
+ ANA_POL_PIR_CFG,
+ ANA_POL_CIR_CFG,
+ ANA_POL_MODE_CFG,
+ ANA_POL_PIR_STATE,
+ ANA_POL_CIR_STATE,
+ ANA_POL_STATE,
+ ANA_POL_FLOWC,
+ ANA_POL_HYST,
+ ANA_POL_MISC_CFG,
+ QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
+ QS_XTR_RD,
+ QS_XTR_FRM_PRUNING,
+ QS_XTR_FLUSH,
+ QS_XTR_DATA_PRESENT,
+ QS_XTR_CFG,
+ QS_INJ_GRP_CFG,
+ QS_INJ_WR,
+ QS_INJ_CTRL,
+ QS_INJ_STATUS,
+ QS_INJ_ERR,
+ QS_INH_DBG,
+ QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
+ QSYS_SWITCH_PORT_MODE,
+ QSYS_STAT_CNT_CFG,
+ QSYS_EEE_CFG,
+ QSYS_EEE_THRES,
+ QSYS_IGR_NO_SHARING,
+ QSYS_EGR_NO_SHARING,
+ QSYS_SW_STATUS,
+ QSYS_EXT_CPU_CFG,
+ QSYS_PAD_CFG,
+ QSYS_CPU_GROUP_MAP,
+ QSYS_QMAP,
+ QSYS_ISDX_SGRP,
+ QSYS_TIMED_FRAME_ENTRY,
+ QSYS_TFRM_MISC,
+ QSYS_TFRM_PORT_DLY,
+ QSYS_TFRM_TIMER_CFG_1,
+ QSYS_TFRM_TIMER_CFG_2,
+ QSYS_TFRM_TIMER_CFG_3,
+ QSYS_TFRM_TIMER_CFG_4,
+ QSYS_TFRM_TIMER_CFG_5,
+ QSYS_TFRM_TIMER_CFG_6,
+ QSYS_TFRM_TIMER_CFG_7,
+ QSYS_TFRM_TIMER_CFG_8,
+ QSYS_RED_PROFILE,
+ QSYS_RES_QOS_MODE,
+ QSYS_RES_CFG,
+ QSYS_RES_STAT,
+ QSYS_EGR_DROP_MODE,
+ QSYS_EQ_CTRL,
+ QSYS_EVENTS_CORE,
+ QSYS_QMAXSDU_CFG_0,
+ QSYS_QMAXSDU_CFG_1,
+ QSYS_QMAXSDU_CFG_2,
+ QSYS_QMAXSDU_CFG_3,
+ QSYS_QMAXSDU_CFG_4,
+ QSYS_QMAXSDU_CFG_5,
+ QSYS_QMAXSDU_CFG_6,
+ QSYS_QMAXSDU_CFG_7,
+ QSYS_PREEMPTION_CFG,
+ QSYS_CIR_CFG,
+ QSYS_EIR_CFG,
+ QSYS_SE_CFG,
+ QSYS_SE_DWRR_CFG,
+ QSYS_SE_CONNECT,
+ QSYS_SE_DLB_SENSE,
+ QSYS_CIR_STATE,
+ QSYS_EIR_STATE,
+ QSYS_SE_STATE,
+ QSYS_HSCH_MISC_CFG,
+ QSYS_TAG_CONFIG,
+ QSYS_TAS_PARAM_CFG_CTRL,
+ QSYS_PORT_MAX_SDU,
+ QSYS_PARAM_CFG_REG_1,
+ QSYS_PARAM_CFG_REG_2,
+ QSYS_PARAM_CFG_REG_3,
+ QSYS_PARAM_CFG_REG_4,
+ QSYS_PARAM_CFG_REG_5,
+ QSYS_GCL_CFG_REG_1,
+ QSYS_GCL_CFG_REG_2,
+ QSYS_PARAM_STATUS_REG_1,
+ QSYS_PARAM_STATUS_REG_2,
+ QSYS_PARAM_STATUS_REG_3,
+ QSYS_PARAM_STATUS_REG_4,
+ QSYS_PARAM_STATUS_REG_5,
+ QSYS_PARAM_STATUS_REG_6,
+ QSYS_PARAM_STATUS_REG_7,
+ QSYS_PARAM_STATUS_REG_8,
+ QSYS_PARAM_STATUS_REG_9,
+ QSYS_GCL_STATUS_REG_1,
+ QSYS_GCL_STATUS_REG_2,
+ REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
+ REW_TAG_CFG,
+ REW_PORT_CFG,
+ REW_DSCP_CFG,
+ REW_PCP_DEI_QOS_MAP_CFG,
+ REW_PTP_CFG,
+ REW_PTP_DLY1_CFG,
+ REW_RED_TAG_CFG,
+ REW_DSCP_REMAP_DP1_CFG,
+ REW_DSCP_REMAP_CFG,
+ REW_STAT_CFG,
+ REW_REW_STICKY,
+ REW_PPT,
+ SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
+ SYS_COUNT_RX_UNICAST,
+ SYS_COUNT_RX_MULTICAST,
+ SYS_COUNT_RX_BROADCAST,
+ SYS_COUNT_RX_SHORTS,
+ SYS_COUNT_RX_FRAGMENTS,
+ SYS_COUNT_RX_JABBERS,
+ SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_SYM_ERRS,
+ SYS_COUNT_RX_64,
+ SYS_COUNT_RX_65_127,
+ SYS_COUNT_RX_128_255,
+ SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_1024_1526,
+ SYS_COUNT_RX_1527_MAX,
+ SYS_COUNT_RX_PAUSE,
+ SYS_COUNT_RX_CONTROL,
+ SYS_COUNT_RX_LONGS,
+ SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_TX_OCTETS,
+ SYS_COUNT_TX_UNICAST,
+ SYS_COUNT_TX_MULTICAST,
+ SYS_COUNT_TX_BROADCAST,
+ SYS_COUNT_TX_COLLISION,
+ SYS_COUNT_TX_DROPS,
+ SYS_COUNT_TX_PAUSE,
+ SYS_COUNT_TX_64,
+ SYS_COUNT_TX_65_127,
+ SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_512_1023,
+ SYS_COUNT_TX_1024_1526,
+ SYS_COUNT_TX_1527_MAX,
+ SYS_COUNT_TX_AGING,
+ SYS_RESET_CFG,
+ SYS_SR_ETYPE_CFG,
+ SYS_VLAN_ETYPE_CFG,
+ SYS_PORT_MODE,
+ SYS_FRONT_PORT_MODE,
+ SYS_FRM_AGING,
+ SYS_STAT_CFG,
+ SYS_SW_STATUS,
+ SYS_MISC_CFG,
+ SYS_REW_MAC_HIGH_CFG,
+ SYS_REW_MAC_LOW_CFG,
+ SYS_TIMESTAMP_OFFSET,
+ SYS_CMID,
+ SYS_PAUSE_CFG,
+ SYS_PAUSE_TOT_CFG,
+ SYS_ATOP,
+ SYS_ATOP_TOT_CFG,
+ SYS_MAC_FC_CFG,
+ SYS_MMGT,
+ SYS_MMGT_FAST,
+ SYS_EVENTS_DIF,
+ SYS_EVENTS_CORE,
+ SYS_CNT,
+ SYS_PTP_STATUS,
+ SYS_PTP_TXSTAMP,
+ SYS_PTP_NXT,
+ SYS_PTP_CFG,
+ SYS_RAM_INIT,
+ SYS_CM_ADDR,
+ SYS_CM_DATA_WR,
+ SYS_CM_DATA_RD,
+ SYS_CM_OP,
+ SYS_CM_DATA,
+ HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET,
+ HSIO_PLL5G_CFG1,
+ HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG3,
+ HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG5,
+ HSIO_PLL5G_CFG6,
+ HSIO_PLL5G_STATUS0,
+ HSIO_PLL5G_STATUS1,
+ HSIO_PLL5G_BIST_CFG0,
+ HSIO_PLL5G_BIST_CFG1,
+ HSIO_PLL5G_BIST_CFG2,
+ HSIO_PLL5G_BIST_STAT0,
+ HSIO_PLL5G_BIST_STAT1,
+ HSIO_RCOMP_CFG0,
+ HSIO_RCOMP_STATUS,
+ HSIO_SYNC_ETH_CFG,
+ HSIO_SYNC_ETH_PLL_CFG,
+ HSIO_S1G_DES_CFG,
+ HSIO_S1G_IB_CFG,
+ HSIO_S1G_OB_CFG,
+ HSIO_S1G_SER_CFG,
+ HSIO_S1G_COMMON_CFG,
+ HSIO_S1G_PLL_CFG,
+ HSIO_S1G_PLL_STATUS,
+ HSIO_S1G_DFT_CFG0,
+ HSIO_S1G_DFT_CFG1,
+ HSIO_S1G_DFT_CFG2,
+ HSIO_S1G_TP_CFG,
+ HSIO_S1G_RC_PLL_BIST_CFG,
+ HSIO_S1G_MISC_CFG,
+ HSIO_S1G_DFT_STATUS,
+ HSIO_S1G_MISC_STATUS,
+ HSIO_MCB_S1G_ADDR_CFG,
+ HSIO_S6G_DIG_CFG,
+ HSIO_S6G_DFT_CFG0,
+ HSIO_S6G_DFT_CFG1,
+ HSIO_S6G_DFT_CFG2,
+ HSIO_S6G_TP_CFG0,
+ HSIO_S6G_TP_CFG1,
+ HSIO_S6G_RC_PLL_BIST_CFG,
+ HSIO_S6G_MISC_CFG,
+ HSIO_S6G_OB_ANEG_CFG,
+ HSIO_S6G_DFT_STATUS,
+ HSIO_S6G_ERR_CNT,
+ HSIO_S6G_MISC_STATUS,
+ HSIO_S6G_DES_CFG,
+ HSIO_S6G_IB_CFG,
+ HSIO_S6G_IB_CFG1,
+ HSIO_S6G_IB_CFG2,
+ HSIO_S6G_IB_CFG3,
+ HSIO_S6G_IB_CFG4,
+ HSIO_S6G_IB_CFG5,
+ HSIO_S6G_OB_CFG,
+ HSIO_S6G_OB_CFG1,
+ HSIO_S6G_SER_CFG,
+ HSIO_S6G_COMMON_CFG,
+ HSIO_S6G_PLL_CFG,
+ HSIO_S6G_ACJTAG_CFG,
+ HSIO_S6G_GP_CFG,
+ HSIO_S6G_IB_STATUS0,
+ HSIO_S6G_IB_STATUS1,
+ HSIO_S6G_ACJTAG_STATUS,
+ HSIO_S6G_PLL_STATUS,
+ HSIO_S6G_REVID,
+ HSIO_MCB_S6G_ADDR_CFG,
+ HSIO_HW_CFG,
+ HSIO_HW_QSGMII_CFG,
+ HSIO_HW_QSGMII_STAT,
+ HSIO_CLK_CFG,
+ HSIO_TEMP_SENSOR_CTRL,
+ HSIO_TEMP_SENSOR_CFG,
+ HSIO_TEMP_SENSOR_STAT,
+};
+
+enum ocelot_regfield {
+ ANA_ADVLEARN_VLAN_CHK,
+ ANA_ADVLEARN_LEARN_MIRROR,
+ ANA_ANEVENTS_FLOOD_DISCARD,
+ ANA_ANEVENTS_MSTI_DROP,
+ ANA_ANEVENTS_ACLKILL,
+ ANA_ANEVENTS_ACLUSED,
+ ANA_ANEVENTS_AUTOAGE,
+ ANA_ANEVENTS_VS2TTL1,
+ ANA_ANEVENTS_STORM_DROP,
+ ANA_ANEVENTS_LEARN_DROP,
+ ANA_ANEVENTS_AGED_ENTRY,
+ ANA_ANEVENTS_CPU_LEARN_FAILED,
+ ANA_ANEVENTS_AUTO_LEARN_FAILED,
+ ANA_ANEVENTS_LEARN_REMOVE,
+ ANA_ANEVENTS_AUTO_LEARNED,
+ ANA_ANEVENTS_AUTO_MOVED,
+ ANA_ANEVENTS_DROPPED,
+ ANA_ANEVENTS_CLASSIFIED_DROP,
+ ANA_ANEVENTS_CLASSIFIED_COPY,
+ ANA_ANEVENTS_VLAN_DISCARD,
+ ANA_ANEVENTS_FWD_DISCARD,
+ ANA_ANEVENTS_MULTICAST_FLOOD,
+ ANA_ANEVENTS_UNICAST_FLOOD,
+ ANA_ANEVENTS_DEST_KNOWN,
+ ANA_ANEVENTS_BUCKET3_MATCH,
+ ANA_ANEVENTS_BUCKET2_MATCH,
+ ANA_ANEVENTS_BUCKET1_MATCH,
+ ANA_ANEVENTS_BUCKET0_MATCH,
+ ANA_ANEVENTS_CPU_OPERATION,
+ ANA_ANEVENTS_DMAC_LOOKUP,
+ ANA_ANEVENTS_SMAC_LOOKUP,
+ ANA_ANEVENTS_SEQ_GEN_ERR_0,
+ ANA_ANEVENTS_SEQ_GEN_ERR_1,
+ ANA_TABLES_MACACCESS_B_DOM,
+ ANA_TABLES_MACTINDX_BUCKET,
+ ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_RESET_CFG_CORE_ENA,
+ SYS_RESET_CFG_MEM_ENA,
+ SYS_RESET_CFG_MEM_INIT,
+ REGFIELD_MAX
+};
+
+struct ocelot_multicast {
+ struct list_head list;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+};
+
+struct ocelot_port;
+
+struct ocelot_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct ocelot {
+ struct device *dev;
+
+ struct regmap *targets[TARGET_MAX];
+ struct regmap_field *regfields[REGFIELD_MAX];
+ const u32 *const *map;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+
+ u8 base_mac[ETH_ALEN];
+
+ struct net_device *hw_bridge_dev;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct workqueue_struct *ocelot_owq;
+
+ int shared_queue_sz;
+
+ u8 num_phys_ports;
+ u8 num_cpu_ports;
+ struct ocelot_port **ports;
+
+ u16 lags[16];
+
+ /* Keep track of the vlan port masks */
+ u32 vlan_mask[VLAN_N_VID];
+
+ struct list_head multicast;
+
+ /* Workqueue to check statistics for overflow with its lock */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+};
+
+struct ocelot_port {
+ struct net_device *dev;
+ struct ocelot *ocelot;
+ struct phy_device *phy;
+ void __iomem *regs;
+ u8 chip_port;
+ /* Keep a track of the mc addresses added to the mac table, so that they
+ * can be removed when needed.
+ */
+ struct list_head mc;
+
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+
+ /* Egress default VLAN (vid) */
+ u16 vid;
+
+ u8 vlan_aware;
+
+ u64 *stats;
+};
+
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
+#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
+
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask,
+ u32 offset);
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields);
+struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
+ struct platform_device *pdev,
+ const char *name);
+
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
+
+int ocelot_init(struct ocelot *ocelot);
+void ocelot_deinit(struct ocelot *ocelot);
+int ocelot_chip_init(struct ocelot *ocelot);
+int ocelot_probe_port(struct ocelot *ocelot, u8 port,
+ void __iomem *regs,
+ struct phy_device *phy);
+
+extern struct notifier_block ocelot_netdevice_nb;
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h
new file mode 100644
index 000000000000..841c6ec22b64
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ana.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ANA_H_
+#define _MSCC_OCELOT_ANA_H_
+
+#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
+#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
+#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
+#define ANA_ANAGEFIL_PID_EN BIT(19)
+#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
+#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
+#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
+#define ANA_ANAGEFIL_VID_EN BIT(13)
+#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
+#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
+
+#define ANA_STORMLIMIT_CFG_RSZ 0x4
+
+#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
+#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
+#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
+#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
+#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
+
+#define ANA_AUTOAGE_AGE_FAST BIT(21)
+#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
+#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
+#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
+
+#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
+#define ANA_MACTOPTIONS_SHADOW BIT(0)
+
+#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
+#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
+#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
+#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
+#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
+#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
+#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
+#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
+#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
+#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
+#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
+#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
+#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
+#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
+
+#define ANA_FLOODING_RSZ 0x4
+
+#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
+
+#define ANA_SFLOW_CFG_RSZ 0x4
+
+#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
+#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
+#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
+
+#define ANA_PORT_MODE_RSZ 0x4
+
+#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
+
+#define ANA_CUT_THRU_CFG_RSZ 0x4
+
+#define ANA_PGID_PGID_RSZ 0x4
+
+#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
+#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
+#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
+#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
+
+#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
+#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
+#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
+#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
+#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
+
+#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
+#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
+#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
+#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
+#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
+#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
+
+#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
+#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
+#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
+#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
+#define ANA_TABLES_MACACCESS_VALID BIT(11)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
+#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
+#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
+#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
+#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
+#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
+
+#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
+#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
+#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
+#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
+#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
+#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
+#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
+#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
+
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
+#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
+
+#define ANA_TABLES_ENTRYLIM_RSZ 0x4
+
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
+
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
+#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
+#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
+#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
+#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
+#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
+#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
+
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
+
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
+
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
+#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
+#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
+#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
+#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
+#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
+#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
+#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
+
+#define ANA_MSTI_STATE_RSZ 0x4
+
+#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
+
+#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
+#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
+#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
+
+#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
+
+#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
+#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
+#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+
+#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
+
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
+#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
+#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+
+#define ANA_PORT_VLAN_CFG_GSZ 0x100
+
+#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
+#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
+#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
+#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
+
+#define ANA_PORT_DROP_CFG_GSZ 0x100
+
+#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
+#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
+#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+
+#define ANA_PORT_QOS_CFG_GSZ 0x100
+
+#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
+#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
+#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
+#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
+
+#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
+#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
+
+#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
+
+#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
+#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
+#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_PORT_CFG_GSZ 0x100
+
+#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
+#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
+#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
+#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
+#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
+#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
+#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
+#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
+#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
+#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
+#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
+
+#define ANA_PORT_POL_CFG_GSZ 0x100
+
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
+#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
+#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
+
+#define ANA_PORT_PTP_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
+
+#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
+
+#define ANA_PORT_SFID_CFG_GSZ 0x100
+#define ANA_PORT_SFID_CFG_RSZ 0x4
+
+#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
+#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
+
+#define ANA_PFC_PFC_CFG_GSZ 0x40
+
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
+
+#define ANA_PFC_PFC_TIMER_GSZ 0x40
+#define ANA_PFC_PFC_TIMER_RSZ 0x4
+
+#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
+
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
+
+#define ANA_IPT_IPT_GSZ 0x8
+
+#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
+#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
+#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
+#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
+#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
+#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
+
+#define ANA_PPT_PPT_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
+
+#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
+#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
+#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
+#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
+#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
+
+#define ANA_CPUQ_8021_CFG_RSZ 0x4
+
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
+
+#define ANA_DSCP_CFG_RSZ 0x4
+
+#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
+
+#define ANA_DSCP_REWR_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
+
+#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
+#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
+
+#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
+#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
+
+#define ANA_FID_CFG_VID_MC_ENA BIT(0)
+
+#define ANA_POL_PIR_CFG_GSZ 0x20
+
+#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_CIR_CFG_GSZ 0x20
+
+#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_MODE_CFG_GSZ 0x20
+
+#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
+#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
+#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
+#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
+#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
+#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
+#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
+
+#define ANA_POL_PIR_STATE_GSZ 0x20
+
+#define ANA_POL_CIR_STATE_GSZ 0x20
+
+#define ANA_POL_STATE_GSZ 0x20
+
+#define ANA_POL_FLOWC_RSZ 0x4
+
+#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
+
+#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
+#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
+#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
+#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
+#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
+
+#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
+#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
new file mode 100644
index 000000000000..18df7d934e81
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/skbuff.h>
+
+#include "ocelot.h"
+
+static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
+{
+ int i;
+ u8 llen, wlen;
+
+ /* The IFH is in network order, switch to CPU order */
+ for (i = 0; i < IFH_LEN; i++)
+ ifh[i] = ntohl((__force __be32)ifh[i]);
+
+ wlen = (ifh[1] >> 7) & 0xff;
+ llen = (ifh[1] >> 15) & 0x3f;
+ info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
+
+ info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
+
+ info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
+ info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+ info->vid = ifh[3] & GENMASK(11, 0);
+
+ return 0;
+}
+
+static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
+ u32 *rval)
+{
+ u32 val;
+ u32 bytes_valid;
+
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_NOT_READY) {
+ if (ifh)
+ return -EIO;
+
+ do {
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ } while (val == XTR_NOT_READY);
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_ESCAPE)
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+{
+ struct ocelot *ocelot = arg;
+ int i = 0, grp = 0;
+ int err = 0;
+
+ if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ u32 *buf;
+ int sz, len;
+ u32 ifh[4];
+ u32 val;
+ struct frame_info info;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
+ if (err != 4)
+ break;
+ }
+
+ if (err != 4)
+ break;
+
+ ocelot_parse_ifh(ifh, &info);
+
+ dev = ocelot->ports[info.port]->dev;
+
+ skb = netdev_alloc_skb(dev, info.len);
+
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf = (u32 *)skb_put(skb, info.len);
+
+ len = 0;
+ do {
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ *buf++ = val;
+ len += sz;
+ } while ((sz == 4) && (len < info.len));
+
+ if (sz < 0) {
+ err = sz;
+ break;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded.
+ */
+ if (ocelot->bridge_mask & BIT(info.port))
+ skb->offload_fwd_mark = 1;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ if (err)
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id mscc_ocelot_match[] = {
+ { .compatible = "mscc,vsc7514-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+
+static int mscc_ocelot_probe(struct platform_device *pdev)
+{
+ int err, irq;
+ unsigned int i;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ports, *portnp;
+ struct ocelot *ocelot;
+ u32 val;
+
+ struct {
+ enum ocelot_target id;
+ char *name;
+ } res[] = {
+ { SYS, "sys" },
+ { REW, "rew" },
+ { QSYS, "qsys" },
+ { ANA, "ana" },
+ { QS, "qs" },
+ { HSIO, "hsio" },
+ };
+
+ if (!np && !pdev->dev.platform_data)
+ return -ENODEV;
+
+ ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
+ if (!ocelot)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ocelot);
+ ocelot->dev = &pdev->dev;
+
+ for (i = 0; i < ARRAY_SIZE(res); i++) {
+ struct regmap *target;
+
+ target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
+ if (IS_ERR(target))
+ return PTR_ERR(target);
+
+ ocelot->targets[res[i].id] = target;
+ }
+
+ err = ocelot_chip_init(ocelot);
+ if (err)
+ return err;
+
+ irq = platform_get_irq_byname(pdev, "xtr");
+ if (irq < 0)
+ return -ENODEV;
+
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ocelot_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", ocelot);
+ if (err)
+ return err;
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+
+ do {
+ msleep(1);
+ regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+ } while (val);
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
+
+ ports = of_get_child_by_name(np, "ethernet-ports");
+ if (!ports) {
+ dev_err(&pdev->dev, "no ethernet-ports child node found\n");
+ return -ENODEV;
+ }
+
+ ocelot->num_phys_ports = of_get_child_count(ports);
+
+ ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+
+ INIT_LIST_HEAD(&ocelot->multicast);
+ ocelot_init(ocelot);
+
+ ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE |
+ HSIO_HW_CFG_DEV1G_6_MODE |
+ HSIO_HW_CFG_DEV1G_9_MODE,
+ HSIO_HW_CFG_DEV1G_4_MODE |
+ HSIO_HW_CFG_DEV1G_6_MODE |
+ HSIO_HW_CFG_DEV1G_9_MODE,
+ HSIO_HW_CFG);
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ struct resource *res;
+ void __iomem *regs;
+ char res_name[8];
+ u32 port;
+
+ if (of_property_read_u32(portnp, "reg", &port))
+ continue;
+
+ snprintf(res_name, sizeof(res_name), "port%d", port);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ continue;
+
+ phy_node = of_parse_phandle(portnp, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
+ phy = of_phy_find_device(phy_node);
+ if (!phy)
+ continue;
+
+ err = ocelot_probe_port(ocelot, port, regs, phy);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe ports\n");
+ goto err_probe_ports;
+ }
+ }
+
+ register_netdevice_notifier(&ocelot_netdevice_nb);
+
+ dev_info(&pdev->dev, "Ocelot switch probed\n");
+
+ return 0;
+
+err_probe_ports:
+ return err;
+}
+
+static int mscc_ocelot_remove(struct platform_device *pdev)
+{
+ struct ocelot *ocelot = platform_get_drvdata(pdev);
+
+ ocelot_deinit(ocelot);
+ unregister_netdevice_notifier(&ocelot_netdevice_nb);
+
+ return 0;
+}
+
+static struct platform_driver mscc_ocelot_driver = {
+ .probe = mscc_ocelot_probe,
+ .remove = mscc_ocelot_remove,
+ .driver = {
+ .name = "ocelot-switch",
+ .of_match_table = mscc_ocelot_match,
+ },
+};
+
+module_platform_driver(mscc_ocelot_driver);
+
+MODULE_DESCRIPTION("Microsemi Ocelot switch driver");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h
new file mode 100644
index 000000000000..0a50d53bbd3f
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_dev.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEV_H_
+#define _MSCC_OCELOT_DEV_H_
+
+#define DEV_CLOCK_CFG 0x0
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PHY_RST BIT(2)
+#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
+
+#define DEV_PORT_MISC 0x4
+
+#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
+#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
+#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
+#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
+#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
+
+#define DEV_EVENTS 0x8
+
+#define DEV_EEE_CFG 0xc
+
+#define DEV_EEE_CFG_EEE_ENA BIT(22)
+#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
+#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
+#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define DEV_EEE_CFG_PORT_LPI BIT(0)
+
+#define DEV_RX_PATH_DELAY 0x10
+
+#define DEV_TX_PATH_DELAY 0x14
+
+#define DEV_PTP_PREDICT_CFG 0x18
+
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
+
+#define DEV_MAC_ENA_CFG 0x1c
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+
+#define DEV_MAC_MODE_CFG 0x20
+
+#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
+
+#define DEV_MAC_MAXLEN_CFG 0x24
+
+#define DEV_MAC_TAGS_CFG 0x28
+
+#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
+#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
+#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+
+#define DEV_MAC_ADV_CHK_CFG 0x2c
+
+#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+
+#define DEV_MAC_IFG_CFG 0x30
+
+#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
+#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
+#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
+#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
+
+#define DEV_MAC_HDX_CFG 0x34
+
+#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
+#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
+#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
+#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
+
+#define DEV_MAC_DBG_CFG 0x38
+
+#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
+#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
+
+#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
+
+#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
+
+#define DEV_MAC_STICKY 0x44
+
+#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
+#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
+#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
+#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
+#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
+#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
+#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
+#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+
+#define PCS1G_CFG 0x48
+
+#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
+#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
+#define PCS1G_CFG_PCS_ENA BIT(0)
+
+#define PCS1G_MODE_CFG 0x4c
+
+#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
+#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+
+#define PCS1G_SD_CFG 0x50
+
+#define PCS1G_SD_CFG_SD_SEL BIT(8)
+#define PCS1G_SD_CFG_SD_POL BIT(4)
+#define PCS1G_SD_CFG_SD_ENA BIT(0)
+
+#define PCS1G_ANEG_CFG 0x54
+
+#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
+
+#define PCS1G_ANEG_NP_CFG 0x58
+
+#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
+#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
+
+#define PCS1G_LB_CFG 0x5c
+
+#define PCS1G_LB_CFG_RA_ENA BIT(4)
+#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
+#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
+
+#define PCS1G_DBG_CFG 0x60
+
+#define PCS1G_DBG_CFG_UDLT BIT(0)
+
+#define PCS1G_CDET_CFG 0x64
+
+#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
+
+#define PCS1G_ANEG_STATUS 0x68
+
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_STATUS_PR BIT(4)
+#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
+#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+
+#define PCS1G_ANEG_NP_STATUS 0x6c
+
+#define PCS1G_LINK_STATUS 0x70
+
+#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
+#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
+#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
+#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+
+#define PCS1G_LINK_DOWN_CNT 0x74
+
+#define PCS1G_STICKY 0x78
+
+#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
+
+#define PCS1G_DEBUG_STATUS 0x7c
+
+#define PCS1G_LPI_CFG 0x80
+
+#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
+#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
+#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
+
+#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
+
+#define PCS1G_LPI_STATUS 0x88
+
+#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
+#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
+#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
+#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
+#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
+#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
+#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
+
+#define PCS1G_TSTPAT_MODE_CFG 0x8c
+
+#define PCS1G_TSTPAT_STATUS 0x90
+
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
+#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
+
+#define DEV_PCS_FX100_CFG 0x94
+
+#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
+#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
+#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
+#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
+#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
+#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
+#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
+#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
+#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
+#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
+#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
+
+#define DEV_PCS_FX100_STATUS 0x98
+
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
+#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
+#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h
new file mode 100644
index 000000000000..6aa40ea223a2
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEV_GMII_H_
+#define _MSCC_OCELOT_DEV_GMII_H_
+
+#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0
+
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
+
+#define DEV_GMII_PORT_MODE_PORT_MISC 0x4
+
+#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5)
+#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4)
+#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3)
+#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2)
+#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1)
+#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0)
+
+#define DEV_GMII_PORT_MODE_EVENTS 0x8
+
+#define DEV_GMII_PORT_MODE_EEE_CFG 0xc
+
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0)
+
+#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10
+
+#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14
+
+#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48
+
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0)
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4)
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8)
+
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c
+
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12))
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+
+#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50
+
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8))
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h
new file mode 100644
index 000000000000..d93ddec3931b
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_hsio.h
@@ -0,0 +1,785 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_HSIO_H_
+#define _MSCC_OCELOT_HSIO_H_
+
+#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31)
+#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30)
+#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29)
+#define HSIO_PLL5G_CFG0_DIV4 BIT(28)
+#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27)
+#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23))
+#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23)
+#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16)
+#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15)
+#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14)
+#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13)
+#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0)
+
+#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18)
+#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17)
+#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16)
+#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15)
+#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5)
+#define HSIO_PLL5G_CFG1_PWD_TX BIT(4)
+#define HSIO_PLL5G_CFG1_PWD_RX BIT(3)
+#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2)
+#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1)
+#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0)
+
+#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30)
+#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29)
+#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28)
+#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27)
+#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26)
+#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24)
+#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15)
+#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14)
+#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13)
+#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12)
+#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10)
+#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5)
+#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4)
+#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3)
+#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1)
+#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0)
+
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22))
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22)
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18)
+#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17)
+#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16)
+#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15)
+#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14)
+#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13)
+#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12)
+#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11)
+#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10)
+#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9)
+#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8)
+#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0))
+#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0)
+
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20))
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7)
+#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6)
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0)
+
+#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12)
+#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1))
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0)
+
+#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21))
+#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21)
+#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16))
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4))
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4)
+#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1)
+#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0)
+
+#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0)
+
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0)
+
+#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13)
+#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12)
+#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10))
+#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10)
+#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8))
+#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8)
+#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4)
+#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0)
+
+#define HSIO_RCOMP_STATUS_BUSY BIT(12)
+#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7)
+#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0)
+
+#define HSIO_SYNC_ETH_CFG_RSZ 0x4
+
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0)
+
+#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0)
+
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8))
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0)
+
+#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24))
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14)
+#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13)
+#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12)
+#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11)
+#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10)
+#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17))
+#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17)
+#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9)
+#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9)
+#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8)
+#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7)
+#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6)
+#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
+#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3)
+#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2)
+#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1)
+#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0)
+
+#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31)
+#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21)
+#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18)
+#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17)
+#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13))
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13)
+#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12)
+#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11)
+#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10)
+#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9)
+#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8)
+#define HSIO_S1G_COMMON_CFG_HRATE BIT(7)
+#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0)
+
+#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22)
+#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5)
+#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3)
+
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12)
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11)
+#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10)
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
+
+#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31)
+#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
+#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2)
+#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0)
+
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
+
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
+#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
+#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
+#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
+#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
+#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0)
+
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
+#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3)
+#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2)
+#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
+#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0)
+
+#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
+
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0))
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0)
+
+#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16)
+#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7)
+#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3))
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0))
+#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0)
+
+#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31)
+#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
+#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2)
+#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0)
+
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
+
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13)
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
+#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7)
+#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6)
+#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
+#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
+#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
+#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
+#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0)
+
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0)
+
+#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
+#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3)
+#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2)
+#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
+#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0)
+
+#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
+
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8))
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0)
+
+#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29))
+#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29)
+#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29)
+#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24))
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20))
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18))
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15))
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7)
+#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5)
+#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4)
+#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3)
+#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2)
+#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1)
+#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0)
+
+#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17))
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17)
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17)
+#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12))
+#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12)
+#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12)
+#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8))
+#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8)
+#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7)
+#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6)
+#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5)
+#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4)
+#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3)
+#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2)
+#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1)
+#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0)
+
+#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27))
+#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27)
+#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22))
+#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22)
+#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22)
+#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19)
+#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16)
+#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10)
+#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5)
+#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3))
+#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3)
+#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0))
+#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0)
+
+#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31)
+#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30)
+#define HSIO_S6G_OB_CFG_OB_POL BIT(29)
+#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23)
+#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18)
+#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16)
+#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11))
+#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11)
+#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11)
+#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10)
+#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9)
+#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8)
+#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4)
+#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6)
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0)
+
+#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8)
+#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7)
+#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6)
+#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
+#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3)
+#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2)
+#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1)
+#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0)
+
+#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17)
+#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16)
+#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15)
+#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14)
+#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13)
+#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9))
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8)
+#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7)
+#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6)
+#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5)
+#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4)
+#define HSIO_S6G_COMMON_CFG_HRATE BIT(3)
+#define HSIO_S6G_COMMON_CFG_QRATE BIT(2)
+#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0))
+#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0)
+
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3)
+#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0)
+
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3)
+#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1)
+#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0)
+
+#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16))
+#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16)
+#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0))
+#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0)
+
+#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8)
+#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7)
+#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6)
+#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1)
+#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0)
+
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18)
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0)
+
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2)
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1)
+#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0)
+
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10)
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9)
+#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8)
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
+
+#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26))
+#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26)
+#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26)
+#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21))
+#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21)
+#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21)
+#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16))
+#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16)
+#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10)
+#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5)
+#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0))
+#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0)
+
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0))
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0)
+
+#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6)
+#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5)
+#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4)
+#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3)
+#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2)
+#define HSIO_HW_CFG_PCIE_ENA BIT(1)
+#define HSIO_HW_CFG_QSGMII_ENA BIT(0)
+
+#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3)
+#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2)
+#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1)
+#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0)
+
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1))
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1)
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define HSIO_HW_QSGMII_STAT_SYNC BIT(0)
+
+#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1))
+#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0)
+
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1)
+#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0)
+
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8)
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0)
+
+#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8)
+#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
new file mode 100644
index 000000000000..c6db8ad31fdf
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "ocelot.h"
+
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+ u32 val;
+
+ WARN_ON(!target);
+
+ regmap_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset, &val);
+ return val;
+}
+EXPORT_SYMBOL(__ocelot_read_ix);
+
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_write(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset, val);
+}
+EXPORT_SYMBOL(__ocelot_write_ix);
+
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+ u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_update_bits(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ mask, val);
+}
+EXPORT_SYMBOL(__ocelot_rmw_ix);
+
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
+{
+ return readl(port->regs + reg);
+}
+EXPORT_SYMBOL(ocelot_port_readl);
+
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
+{
+ writel(val, port->regs + reg);
+}
+EXPORT_SYMBOL(ocelot_port_writel);
+
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields)
+{
+ unsigned int i;
+ u16 target;
+
+ for (i = 0; i < REGFIELD_MAX; i++) {
+ struct reg_field regfield = {};
+ u32 reg = regfields[i].reg;
+
+ if (!reg)
+ continue;
+
+ target = regfields[i].reg >> TARGET_OFFSET;
+
+ regfield.reg = ocelot->map[target][reg & REG_MASK];
+ regfield.lsb = regfields[i].lsb;
+ regfield.msb = regfields[i].msb;
+
+ ocelot->regfields[i] =
+ devm_regmap_field_alloc(ocelot->dev,
+ ocelot->targets[target],
+ regfield);
+
+ if (IS_ERR(ocelot->regfields[i]))
+ return PTR_ERR(ocelot->regfields[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_regfields_init);
+
+static struct regmap_config ocelot_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
+ struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+ void __iomem *regs;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ regs = devm_ioremap_resource(ocelot->dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+
+ ocelot_regmap_config.name = name;
+ return devm_regmap_init_mmio(ocelot->dev, regs,
+ &ocelot_regmap_config);
+}
+EXPORT_SYMBOL(ocelot_io_platform_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_qs.h b/drivers/net/ethernet/mscc/ocelot_qs.h
new file mode 100644
index 000000000000..d18ae726c01d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_qs.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QS_H_
+#define _MSCC_OCELOT_QS_H_
+
+/* TODO handle BE */
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define QS_XTR_GRP_CFG_RSZ 0x4
+
+#define QS_XTR_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_XTR_GRP_CFG_MODE_M GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1)
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+
+#define QS_XTR_RD_RSZ 0x4
+
+#define QS_XTR_FRM_PRUNING_RSZ 0x4
+
+#define QS_XTR_CFG_DP_WM(x) (((x) << 5) & GENMASK(7, 5))
+#define QS_XTR_CFG_DP_WM_M GENMASK(7, 5)
+#define QS_XTR_CFG_DP_WM_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define QS_XTR_CFG_SCH_WM(x) (((x) << 2) & GENMASK(4, 2))
+#define QS_XTR_CFG_SCH_WM_M GENMASK(4, 2)
+#define QS_XTR_CFG_SCH_WM_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QS_XTR_CFG_OFLW_ERR_STICKY(x) ((x) & GENMASK(1, 0))
+#define QS_XTR_CFG_OFLW_ERR_STICKY_M GENMASK(1, 0)
+
+#define QS_INJ_GRP_CFG_RSZ 0x4
+
+#define QS_INJ_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_INJ_GRP_CFG_MODE_M GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+
+#define QS_INJ_WR_RSZ 0x4
+
+#define QS_INJ_CTRL_RSZ 0x4
+
+#define QS_INJ_CTRL_GAP_SIZE(x) (((x) << 21) & GENMASK(24, 21))
+#define QS_INJ_CTRL_GAP_SIZE_M GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_X(x) (((x) & GENMASK(24, 21)) >> 21)
+#define QS_INJ_CTRL_ABORT BIT(20)
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_VLD_BYTES(x) (((x) << 16) & GENMASK(17, 16))
+#define QS_INJ_CTRL_VLD_BYTES_M GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_X(x) (((x) & GENMASK(17, 16)) >> 16)
+
+#define QS_INJ_STATUS_WMARK_REACHED(x) (((x) << 4) & GENMASK(5, 4))
+#define QS_INJ_STATUS_WMARK_REACHED_M GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QS_INJ_STATUS_FIFO_RDY(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_INJ_STATUS_FIFO_RDY_M GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS(x) ((x) & GENMASK(1, 0))
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_M GENMASK(1, 0)
+
+#define QS_INJ_ERR_RSZ 0x4
+
+#define QS_INJ_ERR_ABORT_ERR_STICKY BIT(1)
+#define QS_INJ_ERR_WR_ERR_STICKY BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h
new file mode 100644
index 000000000000..d8c63aa761be
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_qsys.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QSYS_H_
+#define _MSCC_OCELOT_QSYS_H_
+
+#define QSYS_PORT_MODE_RSZ 0x4
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
+
+#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
+
+#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
+#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
+
+#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
+#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
+#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
+#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
+
+#define QSYS_EEE_CFG_RSZ 0x4
+
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
+
+#define QSYS_SW_STATUS_RSZ 0x4
+
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
+
+#define QSYS_QMAP_GSZ 0x4
+
+#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
+#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
+#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
+#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
+#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
+#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
+
+#define QSYS_ISDX_SGRP_GSZ 0x4
+
+#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
+
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
+#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
+
+#define QSYS_RED_PROFILE_RSZ 0x4
+
+#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
+#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
+#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
+
+#define QSYS_RES_CFG_GSZ 0x8
+
+#define QSYS_RES_STAT_GSZ 0x8
+
+#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
+#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
+#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
+#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
+
+#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
+#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
+#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
+
+#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
+#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_CIR_CFG_GSZ 0x80
+
+#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define QSYS_EIR_CFG_GSZ 0x80
+
+#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
+#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
+#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
+#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
+#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
+#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
+
+#define QSYS_SE_CFG_GSZ 0x80
+
+#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
+#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
+#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
+
+#define QSYS_SE_DWRR_CFG_GSZ 0x80
+#define QSYS_SE_DWRR_CFG_RSZ 0x4
+
+#define QSYS_SE_CONNECT_GSZ 0x80
+
+#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
+#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
+#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
+#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
+#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
+#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
+#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
+#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
+#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
+#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
+
+#define QSYS_SE_DLB_SENSE_GSZ 0x80
+
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+
+#define QSYS_CIR_STATE_GSZ 0x80
+
+#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
+#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
+#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
+#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
+#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
+
+#define QSYS_EIR_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
+#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
+#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
+
+#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
+#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
+#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
+#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
+
+#define QSYS_TAG_CONFIG_RSZ 0x4
+
+#define QSYS_TAG_CONFIG_ENABLE BIT(0)
+#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
+#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
+#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
+
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
+#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
+#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
+
+#define QSYS_PORT_MAX_SDU_RSZ 0x4
+
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
+
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
new file mode 100644
index 000000000000..e334b406c40c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include "ocelot.h"
+
+static const u32 ocelot_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x009000),
+ REG(ANA_VLANMASK, 0x009004),
+ REG(ANA_PORT_B_DOMAIN, 0x009008),
+ REG(ANA_ANAGEFIL, 0x00900c),
+ REG(ANA_ANEVENTS, 0x009010),
+ REG(ANA_STORMLIMIT_BURST, 0x009014),
+ REG(ANA_STORMLIMIT_CFG, 0x009018),
+ REG(ANA_ISOLATED_PORTS, 0x009028),
+ REG(ANA_COMMUNITY_PORTS, 0x00902c),
+ REG(ANA_AUTOAGE, 0x009030),
+ REG(ANA_MACTOPTIONS, 0x009034),
+ REG(ANA_LEARNDISC, 0x009038),
+ REG(ANA_AGENCTRL, 0x00903c),
+ REG(ANA_MIRRORPORTS, 0x009040),
+ REG(ANA_EMIRRORPORTS, 0x009044),
+ REG(ANA_FLOODING, 0x009048),
+ REG(ANA_FLOODING_IPMC, 0x00904c),
+ REG(ANA_SFLOW_CFG, 0x009050),
+ REG(ANA_PORT_MODE, 0x009080),
+ REG(ANA_PGID_PGID, 0x008c00),
+ REG(ANA_TABLES_ANMOVED, 0x008b30),
+ REG(ANA_TABLES_MACHDATA, 0x008b34),
+ REG(ANA_TABLES_MACLDATA, 0x008b38),
+ REG(ANA_TABLES_MACACCESS, 0x008b3c),
+ REG(ANA_TABLES_MACTINDX, 0x008b40),
+ REG(ANA_TABLES_VLANACCESS, 0x008b44),
+ REG(ANA_TABLES_VLANTIDX, 0x008b48),
+ REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
+ REG(ANA_TABLES_ISDXTIDX, 0x008b50),
+ REG(ANA_TABLES_ENTRYLIM, 0x008b00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
+ REG(ANA_MSTI_STATE, 0x008e00),
+ REG(ANA_PORT_VLAN_CFG, 0x007000),
+ REG(ANA_PORT_DROP_CFG, 0x007004),
+ REG(ANA_PORT_QOS_CFG, 0x007008),
+ REG(ANA_PORT_VCAP_CFG, 0x00700c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
+ REG(ANA_PORT_PORT_CFG, 0x007070),
+ REG(ANA_PORT_POL_CFG, 0x007074),
+ REG(ANA_PORT_PTP_CFG, 0x007078),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
+ REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG(ANA_PFC_PFC_TIMER, 0x008804),
+ REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
+ REG(ANA_IPT_IPT, 0x008004),
+ REG(ANA_PPT_PPT, 0x008ac0),
+ REG(ANA_FID_MAP_FID_MAP, 0x000000),
+ REG(ANA_AGGR_CFG, 0x0090b4),
+ REG(ANA_CPUQ_CFG, 0x0090b8),
+ REG(ANA_CPUQ_CFG2, 0x0090bc),
+ REG(ANA_CPUQ_8021_CFG, 0x0090c0),
+ REG(ANA_DSCP_CFG, 0x009100),
+ REG(ANA_DSCP_REWR_CFG, 0x009200),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
+ REG(ANA_VRAP_CFG, 0x009280),
+ REG(ANA_VRAP_HDR_DATA, 0x009284),
+ REG(ANA_VRAP_HDR_MASK, 0x009288),
+ REG(ANA_DISCARD_CFG, 0x00928c),
+ REG(ANA_FID_CFG, 0x009290),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG(ANA_POL_STATE, 0x004014),
+ REG(ANA_POL_FLOWC, 0x008b80),
+ REG(ANA_POL_HYST, 0x008bec),
+ REG(ANA_POL_MISC_CFG, 0x008bf0),
+};
+
+static const u32 ocelot_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG(QS_INH_DBG, 0x000048),
+};
+
+static const u32 ocelot_hsio_regmap[] = {
+ REG(HSIO_PLL5G_CFG0, 0x000000),
+ REG(HSIO_PLL5G_CFG1, 0x000004),
+ REG(HSIO_PLL5G_CFG2, 0x000008),
+ REG(HSIO_PLL5G_CFG3, 0x00000c),
+ REG(HSIO_PLL5G_CFG4, 0x000010),
+ REG(HSIO_PLL5G_CFG5, 0x000014),
+ REG(HSIO_PLL5G_CFG6, 0x000018),
+ REG(HSIO_PLL5G_STATUS0, 0x00001c),
+ REG(HSIO_PLL5G_STATUS1, 0x000020),
+ REG(HSIO_PLL5G_BIST_CFG0, 0x000024),
+ REG(HSIO_PLL5G_BIST_CFG1, 0x000028),
+ REG(HSIO_PLL5G_BIST_CFG2, 0x00002c),
+ REG(HSIO_PLL5G_BIST_STAT0, 0x000030),
+ REG(HSIO_PLL5G_BIST_STAT1, 0x000034),
+ REG(HSIO_RCOMP_CFG0, 0x000038),
+ REG(HSIO_RCOMP_STATUS, 0x00003c),
+ REG(HSIO_SYNC_ETH_CFG, 0x000040),
+ REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048),
+ REG(HSIO_S1G_DES_CFG, 0x00004c),
+ REG(HSIO_S1G_IB_CFG, 0x000050),
+ REG(HSIO_S1G_OB_CFG, 0x000054),
+ REG(HSIO_S1G_SER_CFG, 0x000058),
+ REG(HSIO_S1G_COMMON_CFG, 0x00005c),
+ REG(HSIO_S1G_PLL_CFG, 0x000060),
+ REG(HSIO_S1G_PLL_STATUS, 0x000064),
+ REG(HSIO_S1G_DFT_CFG0, 0x000068),
+ REG(HSIO_S1G_DFT_CFG1, 0x00006c),
+ REG(HSIO_S1G_DFT_CFG2, 0x000070),
+ REG(HSIO_S1G_TP_CFG, 0x000074),
+ REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078),
+ REG(HSIO_S1G_MISC_CFG, 0x00007c),
+ REG(HSIO_S1G_DFT_STATUS, 0x000080),
+ REG(HSIO_S1G_MISC_STATUS, 0x000084),
+ REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088),
+ REG(HSIO_S6G_DIG_CFG, 0x00008c),
+ REG(HSIO_S6G_DFT_CFG0, 0x000090),
+ REG(HSIO_S6G_DFT_CFG1, 0x000094),
+ REG(HSIO_S6G_DFT_CFG2, 0x000098),
+ REG(HSIO_S6G_TP_CFG0, 0x00009c),
+ REG(HSIO_S6G_TP_CFG1, 0x0000a0),
+ REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4),
+ REG(HSIO_S6G_MISC_CFG, 0x0000a8),
+ REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac),
+ REG(HSIO_S6G_DFT_STATUS, 0x0000b0),
+ REG(HSIO_S6G_ERR_CNT, 0x0000b4),
+ REG(HSIO_S6G_MISC_STATUS, 0x0000b8),
+ REG(HSIO_S6G_DES_CFG, 0x0000bc),
+ REG(HSIO_S6G_IB_CFG, 0x0000c0),
+ REG(HSIO_S6G_IB_CFG1, 0x0000c4),
+ REG(HSIO_S6G_IB_CFG2, 0x0000c8),
+ REG(HSIO_S6G_IB_CFG3, 0x0000cc),
+ REG(HSIO_S6G_IB_CFG4, 0x0000d0),
+ REG(HSIO_S6G_IB_CFG5, 0x0000d4),
+ REG(HSIO_S6G_OB_CFG, 0x0000d8),
+ REG(HSIO_S6G_OB_CFG1, 0x0000dc),
+ REG(HSIO_S6G_SER_CFG, 0x0000e0),
+ REG(HSIO_S6G_COMMON_CFG, 0x0000e4),
+ REG(HSIO_S6G_PLL_CFG, 0x0000e8),
+ REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec),
+ REG(HSIO_S6G_GP_CFG, 0x0000f0),
+ REG(HSIO_S6G_IB_STATUS0, 0x0000f4),
+ REG(HSIO_S6G_IB_STATUS1, 0x0000f8),
+ REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc),
+ REG(HSIO_S6G_PLL_STATUS, 0x000100),
+ REG(HSIO_S6G_REVID, 0x000104),
+ REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108),
+ REG(HSIO_HW_CFG, 0x00010c),
+ REG(HSIO_HW_QSGMII_CFG, 0x000110),
+ REG(HSIO_HW_QSGMII_STAT, 0x000114),
+ REG(HSIO_CLK_CFG, 0x000118),
+ REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c),
+ REG(HSIO_TEMP_SENSOR_CFG, 0x000120),
+ REG(HSIO_TEMP_SENSOR_STAT, 0x000124),
+};
+
+static const u32 ocelot_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x011200),
+ REG(QSYS_SWITCH_PORT_MODE, 0x011234),
+ REG(QSYS_STAT_CNT_CFG, 0x011264),
+ REG(QSYS_EEE_CFG, 0x011268),
+ REG(QSYS_EEE_THRES, 0x011294),
+ REG(QSYS_IGR_NO_SHARING, 0x011298),
+ REG(QSYS_EGR_NO_SHARING, 0x01129c),
+ REG(QSYS_SW_STATUS, 0x0112a0),
+ REG(QSYS_EXT_CPU_CFG, 0x0112d0),
+ REG(QSYS_PAD_CFG, 0x0112d4),
+ REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
+ REG(QSYS_QMAP, 0x0112dc),
+ REG(QSYS_ISDX_SGRP, 0x011400),
+ REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
+ REG(QSYS_TFRM_MISC, 0x011310),
+ REG(QSYS_TFRM_PORT_DLY, 0x011314),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
+ REG(QSYS_RED_PROFILE, 0x011338),
+ REG(QSYS_RES_QOS_MODE, 0x011378),
+ REG(QSYS_RES_CFG, 0x012000),
+ REG(QSYS_RES_STAT, 0x012004),
+ REG(QSYS_EGR_DROP_MODE, 0x01137c),
+ REG(QSYS_EQ_CTRL, 0x011380),
+ REG(QSYS_EVENTS_CORE, 0x011384),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG(QSYS_SE_CONNECT, 0x00003c),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG(QSYS_SE_STATE, 0x00004c),
+ REG(QSYS_HSCH_MISC_CFG, 0x011388),
+};
+
+static const u32 ocelot_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
+ REG(REW_DSCP_REMAP_CFG, 0x000790),
+ REG(REW_STAT_CFG, 0x000890),
+ REG(REW_PPT, 0x000680),
+};
+
+static const u32 ocelot_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_PAUSE, 0x00003c),
+ REG(SYS_COUNT_RX_CONTROL, 0x000040),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_511, 0x000124),
+ REG(SYS_COUNT_TX_512_1023, 0x000128),
+ REG(SYS_COUNT_TX_1024_1526, 0x00012c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_RESET_CFG, 0x000508),
+ REG(SYS_CMID, 0x00050c),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000510),
+ REG(SYS_PORT_MODE, 0x000514),
+ REG(SYS_FRONT_PORT_MODE, 0x000548),
+ REG(SYS_FRM_AGING, 0x000574),
+ REG(SYS_STAT_CFG, 0x000578),
+ REG(SYS_SW_STATUS, 0x00057c),
+ REG(SYS_MISC_CFG, 0x0005ac),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
+ REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
+ REG(SYS_CM_ADDR, 0x000500),
+ REG(SYS_CM_DATA, 0x000504),
+ REG(SYS_PAUSE_CFG, 0x000608),
+ REG(SYS_PAUSE_TOT_CFG, 0x000638),
+ REG(SYS_ATOP, 0x00063c),
+ REG(SYS_ATOP_TOT_CFG, 0x00066c),
+ REG(SYS_MAC_FC_CFG, 0x000670),
+ REG(SYS_MMGT, 0x00069c),
+ REG(SYS_MMGT_FAST, 0x0006a0),
+ REG(SYS_EVENTS_DIF, 0x0006a4),
+ REG(SYS_EVENTS_CORE, 0x0006b4),
+ REG(SYS_CNT, 0x000000),
+ REG(SYS_PTP_STATUS, 0x0006b8),
+ REG(SYS_PTP_TXSTAMP, 0x0006bc),
+ REG(SYS_PTP_NXT, 0x0006c0),
+ REG(SYS_PTP_CFG, 0x0006c4),
+};
+
+static const u32 *ocelot_regmap[] = {
+ [ANA] = ocelot_ana_regmap,
+ [QS] = ocelot_qs_regmap,
+ [HSIO] = ocelot_hsio_regmap,
+ [QSYS] = ocelot_qsys_regmap,
+ [REW] = ocelot_rew_regmap,
+ [SYS] = ocelot_sys_regmap,
+};
+
+static const struct reg_field ocelot_regfields[] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+ [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+ [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+};
+
+static const struct ocelot_stat_layout ocelot_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02, },
+ { .name = "rx_broadcast", .offset = 0x03, },
+ { .name = "rx_shorts", .offset = 0x04, },
+ { .name = "rx_fragments", .offset = 0x05, },
+ { .name = "rx_jabbers", .offset = 0x06, },
+ { .name = "rx_crc_align_errs", .offset = 0x07, },
+ { .name = "rx_sym_errs", .offset = 0x08, },
+ { .name = "rx_frames_below_65_octets", .offset = 0x09, },
+ { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
+ { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
+ { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
+ { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
+ { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
+ { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
+ { .name = "rx_pause", .offset = 0x10, },
+ { .name = "rx_control", .offset = 0x11, },
+ { .name = "rx_longs", .offset = 0x12, },
+ { .name = "rx_classified_drops", .offset = 0x13, },
+ { .name = "rx_red_prio_0", .offset = 0x14, },
+ { .name = "rx_red_prio_1", .offset = 0x15, },
+ { .name = "rx_red_prio_2", .offset = 0x16, },
+ { .name = "rx_red_prio_3", .offset = 0x17, },
+ { .name = "rx_red_prio_4", .offset = 0x18, },
+ { .name = "rx_red_prio_5", .offset = 0x19, },
+ { .name = "rx_red_prio_6", .offset = 0x1A, },
+ { .name = "rx_red_prio_7", .offset = 0x1B, },
+ { .name = "rx_yellow_prio_0", .offset = 0x1C, },
+ { .name = "rx_yellow_prio_1", .offset = 0x1D, },
+ { .name = "rx_yellow_prio_2", .offset = 0x1E, },
+ { .name = "rx_yellow_prio_3", .offset = 0x1F, },
+ { .name = "rx_yellow_prio_4", .offset = 0x20, },
+ { .name = "rx_yellow_prio_5", .offset = 0x21, },
+ { .name = "rx_yellow_prio_6", .offset = 0x22, },
+ { .name = "rx_yellow_prio_7", .offset = 0x23, },
+ { .name = "rx_green_prio_0", .offset = 0x24, },
+ { .name = "rx_green_prio_1", .offset = 0x25, },
+ { .name = "rx_green_prio_2", .offset = 0x26, },
+ { .name = "rx_green_prio_3", .offset = 0x27, },
+ { .name = "rx_green_prio_4", .offset = 0x28, },
+ { .name = "rx_green_prio_5", .offset = 0x29, },
+ { .name = "rx_green_prio_6", .offset = 0x2A, },
+ { .name = "rx_green_prio_7", .offset = 0x2B, },
+ { .name = "tx_octets", .offset = 0x40, },
+ { .name = "tx_unicast", .offset = 0x41, },
+ { .name = "tx_multicast", .offset = 0x42, },
+ { .name = "tx_broadcast", .offset = 0x43, },
+ { .name = "tx_collision", .offset = 0x44, },
+ { .name = "tx_drops", .offset = 0x45, },
+ { .name = "tx_pause", .offset = 0x46, },
+ { .name = "tx_frames_below_65_octets", .offset = 0x47, },
+ { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
+ { .name = "tx_frames_128_255_octets", .offset = 0x49, },
+ { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
+ { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
+ { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
+ { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
+ { .name = "tx_yellow_prio_0", .offset = 0x4E, },
+ { .name = "tx_yellow_prio_1", .offset = 0x4F, },
+ { .name = "tx_yellow_prio_2", .offset = 0x50, },
+ { .name = "tx_yellow_prio_3", .offset = 0x51, },
+ { .name = "tx_yellow_prio_4", .offset = 0x52, },
+ { .name = "tx_yellow_prio_5", .offset = 0x53, },
+ { .name = "tx_yellow_prio_6", .offset = 0x54, },
+ { .name = "tx_yellow_prio_7", .offset = 0x55, },
+ { .name = "tx_green_prio_0", .offset = 0x56, },
+ { .name = "tx_green_prio_1", .offset = 0x57, },
+ { .name = "tx_green_prio_2", .offset = 0x58, },
+ { .name = "tx_green_prio_3", .offset = 0x59, },
+ { .name = "tx_green_prio_4", .offset = 0x5A, },
+ { .name = "tx_green_prio_5", .offset = 0x5B, },
+ { .name = "tx_green_prio_6", .offset = 0x5C, },
+ { .name = "tx_green_prio_7", .offset = 0x5D, },
+ { .name = "tx_aged", .offset = 0x5E, },
+ { .name = "drop_local", .offset = 0x80, },
+ { .name = "drop_tail", .offset = 0x81, },
+ { .name = "drop_yellow_prio_0", .offset = 0x82, },
+ { .name = "drop_yellow_prio_1", .offset = 0x83, },
+ { .name = "drop_yellow_prio_2", .offset = 0x84, },
+ { .name = "drop_yellow_prio_3", .offset = 0x85, },
+ { .name = "drop_yellow_prio_4", .offset = 0x86, },
+ { .name = "drop_yellow_prio_5", .offset = 0x87, },
+ { .name = "drop_yellow_prio_6", .offset = 0x88, },
+ { .name = "drop_yellow_prio_7", .offset = 0x89, },
+ { .name = "drop_green_prio_0", .offset = 0x8A, },
+ { .name = "drop_green_prio_1", .offset = 0x8B, },
+ { .name = "drop_green_prio_2", .offset = 0x8C, },
+ { .name = "drop_green_prio_3", .offset = 0x8D, },
+ { .name = "drop_green_prio_4", .offset = 0x8E, },
+ { .name = "drop_green_prio_5", .offset = 0x8F, },
+ { .name = "drop_green_prio_6", .offset = 0x90, },
+ { .name = "drop_green_prio_7", .offset = 0x91, },
+};
+
+static void ocelot_pll5_init(struct ocelot *ocelot)
+{
+ /* Configure PLL5. This will need a proper CCF driver
+ * The values are coming from the VTSS API for Ocelot
+ */
+ ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4);
+ ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
+ HSIO_PLL5G_CFG0_ENA_BIAS |
+ HSIO_PLL5G_CFG0_ENA_VCO_BUF |
+ HSIO_PLL5G_CFG0_ENA_CP1 |
+ HSIO_PLL5G_CFG0_SELCPI(2) |
+ HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
+ HSIO_PLL5G_CFG0_SELBGV820(4) |
+ HSIO_PLL5G_CFG0_DIV4 |
+ HSIO_PLL5G_CFG0_ENA_CLKTREE |
+ HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0);
+ ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
+ HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
+ HSIO_PLL5G_CFG2_ENA_AMPCTRL |
+ HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2);
+}
+
+int ocelot_chip_init(struct ocelot *ocelot)
+{
+ int ret;
+
+ ocelot->map = ocelot_regmap;
+ ocelot->stats_layout = ocelot_stats_layout;
+ ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
+ ocelot->shared_queue_sz = 224 * 1024;
+
+ ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+ if (ret)
+ return ret;
+
+ ocelot_pll5_init(ocelot);
+
+ eth_random_addr(ocelot->base_mac);
+ ocelot->base_mac[5] &= 0xf0;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_chip_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_rew.h b/drivers/net/ethernet/mscc/ocelot_rew.h
new file mode 100644
index 000000000000..210914b7e20f
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_rew.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_REW_H_
+#define _MSCC_OCELOT_REW_H_
+
+#define REW_PORT_VLAN_CFG_GSZ 0x80
+
+#define REW_PORT_VLAN_CFG_PORT_TPID(x) (((x) << 16) & GENMASK(31, 16))
+#define REW_PORT_VLAN_CFG_PORT_TPID_M GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15)
+#define REW_PORT_VLAN_CFG_PORT_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define REW_PORT_VLAN_CFG_PORT_PCP_M GENMASK(14, 12)
+#define REW_PORT_VLAN_CFG_PORT_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define REW_PORT_VLAN_CFG_PORT_VID(x) ((x) & GENMASK(11, 0))
+#define REW_PORT_VLAN_CFG_PORT_VID_M GENMASK(11, 0)
+
+#define REW_TAG_CFG_GSZ 0x80
+
+#define REW_TAG_CFG_TAG_CFG(x) (((x) << 7) & GENMASK(8, 7))
+#define REW_TAG_CFG_TAG_CFG_M GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_X(x) (((x) & GENMASK(8, 7)) >> 7)
+#define REW_TAG_CFG_TAG_TPID_CFG(x) (((x) << 5) & GENMASK(6, 5))
+#define REW_TAG_CFG_TAG_TPID_CFG_M GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_X(x) (((x) & GENMASK(6, 5)) >> 5)
+#define REW_TAG_CFG_TAG_VID_CFG BIT(4)
+#define REW_TAG_CFG_TAG_PCP_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define REW_TAG_CFG_TAG_PCP_CFG_M GENMASK(3, 2)
+#define REW_TAG_CFG_TAG_PCP_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define REW_TAG_CFG_TAG_DEI_CFG(x) ((x) & GENMASK(1, 0))
+#define REW_TAG_CFG_TAG_DEI_CFG_M GENMASK(1, 0)
+
+#define REW_PORT_CFG_GSZ 0x80
+
+#define REW_PORT_CFG_ES0_EN BIT(5)
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG(x) (((x) << 3) & GENMASK(4, 3))
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_M GENMASK(4, 3)
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(2)
+#define REW_PORT_CFG_FLUSH_ENA BIT(1)
+#define REW_PORT_CFG_AGE_DIS BIT(0)
+
+#define REW_DSCP_CFG_GSZ 0x80
+
+#define REW_PCP_DEI_QOS_MAP_CFG_GSZ 0x80
+#define REW_PCP_DEI_QOS_MAP_CFG_RSZ 0x4
+
+#define REW_PCP_DEI_QOS_MAP_CFG_DEI_QOS_VAL BIT(3)
+#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL(x) ((x) & GENMASK(2, 0))
+#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL_M GENMASK(2, 0)
+
+#define REW_PTP_CFG_GSZ 0x80
+
+#define REW_PTP_CFG_PTP_BACKPLANE_MODE BIT(7)
+#define REW_PTP_CFG_GP_CFG_UNUSED(x) (((x) << 3) & GENMASK(6, 3))
+#define REW_PTP_CFG_GP_CFG_UNUSED_M GENMASK(6, 3)
+#define REW_PTP_CFG_GP_CFG_UNUSED_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define REW_PTP_CFG_PTP_1STEP_DIS BIT(2)
+#define REW_PTP_CFG_PTP_2STEP_DIS BIT(1)
+#define REW_PTP_CFG_PTP_UDP_KEEP BIT(0)
+
+#define REW_PTP_DLY1_CFG_GSZ 0x80
+
+#define REW_RED_TAG_CFG_GSZ 0x80
+
+#define REW_RED_TAG_CFG_RED_TAG_CFG BIT(0)
+
+#define REW_DSCP_REMAP_DP1_CFG_RSZ 0x4
+
+#define REW_DSCP_REMAP_CFG_RSZ 0x4
+
+#define REW_REW_STICKY_ES0_TAGB_PUSH_FAILED BIT(0)
+
+#define REW_PPT_RSZ 0x4
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/drivers/net/ethernet/mscc/ocelot_sys.h
new file mode 100644
index 000000000000..16f91e172bcb
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_sys.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_SYS_H_
+#define _MSCC_OCELOT_SYS_H_
+
+#define SYS_COUNT_RX_OCTETS_RSZ 0x4
+
+#define SYS_COUNT_TX_OCTETS_RSZ 0x4
+
+#define SYS_PORT_MODE_RSZ 0x4
+
+#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5))
+#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5)
+#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5)
+#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3))
+#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3)
+#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1))
+#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1)
+#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0)
+
+#define SYS_FRONT_PORT_MODE_RSZ 0x4
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0))
+#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0)
+
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10))
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10)
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10)
+#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0))
+#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0)
+
+#define SYS_SW_STATUS_RSZ 0x4
+
+#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0)
+
+#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1)
+#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0)
+
+#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4
+
+#define SYS_REW_MAC_LOW_CFG_RSZ 0x4
+
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6))
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6)
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6)
+#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0))
+#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0)
+
+#define SYS_PAUSE_CFG_RSZ 0x4
+
+#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10))
+#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10)
+#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1))
+#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1)
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9))
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9)
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9)
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0))
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0)
+
+#define SYS_ATOP_RSZ 0x4
+
+#define SYS_MAC_FC_CFG_RSZ 0x4
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26))
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20))
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0))
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0)
+
+#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16))
+#define SYS_MMGT_RELCNT_M GENMASK(31, 16)
+#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0))
+#define SYS_MMGT_FREECNT_M GENMASK(15, 0)
+
+#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4))
+#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4)
+#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0))
+#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0)
+
+#define SYS_EVENTS_DIF_RSZ 0x4
+
+#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6))
+#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6)
+#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0))
+#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0)
+
+#define SYS_EVENTS_CORE_EV_FWR BIT(2)
+#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0))
+#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0)
+
+#define SYS_CNT_GSZ 0x4
+
+#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29)
+#define SYS_PTP_STATUS_PTP_OVFL BIT(28)
+#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27)
+#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21))
+#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21)
+#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21)
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16))
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16)
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0))
+#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0)
+
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0))
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0)
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31)
+
+#define SYS_PTP_NXT_PTP_NXT BIT(0)
+
+#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2))
+#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2)
+#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0))
+#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 6223930a8155..c60da9e8bf14 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -693,7 +693,7 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
return VXGE_HW_OK;
else
- return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ return VXGE_HW_ERR_PRIVILEGED_OPERATION;
}
/*
@@ -1920,7 +1920,7 @@ enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
}
if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
goto exit;
}
@@ -3153,7 +3153,7 @@ vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
case vxge_hw_mgmt_reg_type_mrpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
break;
}
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
@@ -3165,7 +3165,7 @@ vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
case vxge_hw_mgmt_reg_type_srpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
break;
}
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
@@ -3279,7 +3279,7 @@ vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
case vxge_hw_mgmt_reg_type_mrpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
- status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
break;
}
if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
@@ -3291,7 +3291,7 @@ vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
case vxge_hw_mgmt_reg_type_srpcim:
if (!(hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
- status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ status = VXGE_HW_ERR_PRIVILEGED_OPERATION;
break;
}
if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index cfa970417f81..d743a37a3cee 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -127,7 +127,7 @@ enum vxge_hw_status {
VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
- VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
+ VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17,
VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 0452848d1316..03c3d1230c17 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -276,7 +276,7 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
*ptr++ = 0;
status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
if (status != VXGE_HW_OK) {
- if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) {
+ if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) {
vxge_debug_init(VXGE_ERR,
"%s : %d Failure in getting xmac stats",
__func__, __LINE__);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index b2299f2b2155..a8918bb7c802 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3484,11 +3484,11 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
0,
&stat);
- if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
+ if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
vxge_debug_init(
vxge_hw_device_trace_level_get(hldev),
"%s: device stats clear returns"
- "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
+ "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
"%s: %s:%d Exiting...",
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index ae0c46ba7546..66f15b05b65e 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -36,6 +36,19 @@ config NFP_APP_FLOWER
either directly, with Open vSwitch, or any other way. Note that
TC Flower offload requires specific FW to work.
+config NFP_APP_ABM_NIC
+ bool "NFP4000/NFP6000 Advanced buffer management NIC support"
+ depends on NFP
+ depends on NET_SWITCHDEV
+ default y
+ help
+ Enable driver support for Advanced buffer management NIC on NFP.
+ ABM NIC allows advanced configuration of queuing and scheduling
+ of packets, including ECN marking. Say Y, if you are planning to
+ use one of the NFP4000 and NFP6000 platforms which support this
+ functionality.
+ Code will be built into the nfp.ko driver.
+
config NFP_DEBUG
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index d5866d708dfa..4afb10375397 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -30,12 +30,14 @@ nfp-objs := \
nfp_net_sriov.o \
nfp_netvf_main.o \
nfp_port.o \
+ nfp_shared_buf.o \
nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \
flower/action.o \
flower/cmsg.o \
+ flower/lag_conf.o \
flower/main.o \
flower/match.o \
flower/metadata.o \
@@ -52,4 +54,10 @@ nfp-objs += \
bpf/jit.o
endif
+ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
+nfp-objs += \
+ abm/ctrl.o \
+ abm/main.o
+endif
+
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
new file mode 100644
index 000000000000..b157ccd8c80f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
+#include "../nfp_app.h"
+#include "../nfp_abi.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "main.h"
+
+#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u"
+#define NFP_QLVL_STRIDE 16
+#define NFP_QLVL_BLOG_BYTES 0
+#define NFP_QLVL_BLOG_PKTS 4
+#define NFP_QLVL_THRS 8
+
+#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats"
+#define NFP_QMSTAT_STRIDE 32
+#define NFP_QMSTAT_NON_STO 0
+#define NFP_QMSTAT_STO 8
+#define NFP_QMSTAT_DROP 16
+#define NFP_QMSTAT_ECN 24
+
+static unsigned long long
+nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue)
+{
+ return alink->abm->q_lvls->addr +
+ (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+}
+
+static int
+nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
+ unsigned int stride, unsigned int offset, unsigned int i,
+ bool is_u64, u64 *res)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ u32 val32, mur;
+ u64 val, addr;
+ int err;
+
+ mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain);
+
+ addr = sym->addr + (alink->queue_base + i) * stride + offset;
+ if (is_u64)
+ err = nfp_cpp_readq(cpp, mur, addr, &val);
+ else
+ err = nfp_cpp_readl(cpp, mur, addr, &val32);
+ if (err) {
+ nfp_err(cpp,
+ "RED offload reading stat failed on vNIC %d queue %d\n",
+ alink->id, i);
+ return err;
+ }
+
+ *res = is_u64 ? val : val32;
+ return 0;
+}
+
+static int
+nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
+ unsigned int stride, unsigned int offset, bool is_u64,
+ u64 *res)
+{
+ u64 val, sum = 0;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < alink->vnic->max_rx_rings; i++) {
+ err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
+ is_u64, &val);
+ if (err)
+ return err;
+ sum += val;
+ }
+
+ *res = sum;
+ return 0;
+}
+
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ u32 muw;
+ int err;
+
+ muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target,
+ alink->abm->q_lvls->domain);
+
+ err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val);
+ if (err) {
+ nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
+ alink->id, i);
+ return err;
+ }
+
+ return 0;
+}
+
+int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+{
+ int i, err;
+
+ for (i = 0; i < alink->vnic->max_rx_rings; i++) {
+ err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
+{
+ u64 val;
+
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
+ NFP_QMSTAT_NON_STO, i, true, &val))
+ return 0;
+ return val;
+}
+
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
+{
+ u64 val;
+
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
+ NFP_QMSTAT_STO, i, true, &val))
+ return 0;
+ return val;
+}
+
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+ struct nfp_alink_stats *stats)
+{
+ int err;
+
+ stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
+ stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
+ NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
+ i, false, &stats->backlog_bytes);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
+ NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
+ i, false, &stats->backlog_pkts);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
+ i, true, &stats->drops);
+ if (err)
+ return err;
+
+ return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
+ i, true, &stats->overlimits);
+}
+
+int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
+ struct nfp_alink_stats *stats)
+{
+ u64 pkts = 0, bytes = 0;
+ int i, err;
+
+ for (i = 0; i < alink->vnic->max_rx_rings; i++) {
+ pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
+ bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+ }
+ stats->tx_pkts = pkts;
+ stats->tx_bytes = bytes;
+
+ err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
+ NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
+ false, &stats->backlog_bytes);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
+ NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
+ false, &stats->backlog_pkts);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
+ true, &stats->drops);
+ if (err)
+ return err;
+
+ return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
+ true, &stats->overlimits);
+}
+
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+ struct nfp_alink_xstats *xstats)
+{
+ int err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
+ i, true, &xstats->pdrop);
+ if (err)
+ return err;
+
+ return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
+ i, true, &xstats->ecn_marked);
+}
+
+int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
+ struct nfp_alink_xstats *xstats)
+{
+ int err;
+
+ err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
+ true, &xstats->pdrop);
+ if (err)
+ return err;
+
+ return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
+ true, &xstats->ecn_marked);
+}
+
+int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
+{
+ return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE,
+ NULL, 0, NULL, 0);
+}
+
+int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
+{
+ return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_DISABLE,
+ NULL, 0, NULL, 0);
+}
+
+void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
+{
+ alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
+ alink->queue_base /= alink->vnic->stride_rx;
+}
+
+static const struct nfp_rtsym *
+nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
+{
+ const struct nfp_rtsym *sym;
+
+ sym = nfp_rtsym_lookup(pf->rtbl, name);
+ if (!sym) {
+ nfp_err(pf->cpp, "Symbol '%s' not found\n", name);
+ return ERR_PTR(-ENOENT);
+ }
+ if (sym->size != size) {
+ nfp_err(pf->cpp,
+ "Symbol '%s' wrong size: expected %u got %llu\n",
+ name, size, sym->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return sym;
+}
+
+static const struct nfp_rtsym *
+nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
+ unsigned int size)
+{
+ return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
+}
+
+int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
+{
+ struct nfp_pf *pf = abm->app->pf;
+ const struct nfp_rtsym *sym;
+ unsigned int pf_id;
+ char pf_symbol[64];
+
+ pf_id = nfp_cppcore_pcie_unit(pf->cpp);
+ abm->pf_id = pf_id;
+
+ snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
+ sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+ abm->q_lvls = sym;
+
+ snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
+ sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+ abm->qm_stats = sym;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
new file mode 100644
index 000000000000..1561c2724c26
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
+{
+ return FIELD_PREP(NFP_ABM_PORTID_TYPE, rtype) |
+ FIELD_PREP(NFP_ABM_PORTID_ID, id);
+}
+
+static int
+__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle, unsigned int qs, u32 init_val)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ int ret;
+
+ ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
+ memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
+
+ alink->parent = handle;
+ alink->num_qdiscs = qs;
+ port->tc_offload_cnt = qs;
+
+ return ret;
+}
+
+static void
+nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle, unsigned int qs)
+{
+ __nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
+}
+
+static int
+nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ unsigned int i = TC_H_MIN(opt->parent) - 1;
+
+ if (opt->parent == TC_H_ROOT)
+ i = 0;
+ else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
+ i = TC_H_MIN(opt->parent) - 1;
+ else
+ return -EOPNOTSUPP;
+
+ if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
+ return -EOPNOTSUPP;
+
+ return i;
+}
+
+static void
+nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
+{
+ unsigned int i;
+
+ for (i = 0; i < alink->num_qdiscs; i++)
+ if (handle == alink->qdiscs[i].handle)
+ break;
+ if (i == alink->num_qdiscs)
+ return;
+
+ if (alink->parent == TC_H_ROOT) {
+ nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
+ } else {
+ nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
+ memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
+ }
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ bool existing;
+ int i, err;
+
+ i = nfp_abm_red_find(alink, opt);
+ existing = i >= 0;
+
+ if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
+ nfp_warn(alink->abm->app->cpp,
+ "RED offload failed - unsupported parameters\n");
+ err = -EINVAL;
+ goto err_destroy;
+ }
+
+ if (existing) {
+ if (alink->parent == TC_H_ROOT)
+ err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
+ else
+ err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
+ if (err)
+ goto err_destroy;
+ return 0;
+ }
+
+ if (opt->parent == TC_H_ROOT) {
+ i = 0;
+ err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
+ opt->set.min);
+ } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
+ i = TC_H_MIN(opt->parent) - 1;
+ err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
+ } else {
+ return -EINVAL;
+ }
+ /* Set the handle to try full clean up, in case IO failed */
+ alink->qdiscs[i].handle = opt->handle;
+ if (err)
+ goto err_destroy;
+
+ if (opt->parent == TC_H_ROOT)
+ err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
+ else
+ err = nfp_abm_ctrl_read_q_stats(alink, i,
+ &alink->qdiscs[i].stats);
+ if (err)
+ goto err_destroy;
+
+ if (opt->parent == TC_H_ROOT)
+ err = nfp_abm_ctrl_read_xstats(alink,
+ &alink->qdiscs[i].xstats);
+ else
+ err = nfp_abm_ctrl_read_q_xstats(alink, i,
+ &alink->qdiscs[i].xstats);
+ if (err)
+ goto err_destroy;
+
+ alink->qdiscs[i].stats.backlog_pkts = 0;
+ alink->qdiscs[i].stats.backlog_bytes = 0;
+
+ return 0;
+err_destroy:
+ /* If the qdisc keeps on living, but we can't offload undo changes */
+ if (existing) {
+ opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
+ opt->set.qstats->backlog -=
+ alink->qdiscs[i].stats.backlog_bytes;
+ }
+ nfp_abm_red_destroy(netdev, alink, opt->handle);
+
+ return err;
+}
+
+static void
+nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
+ struct tc_qopt_offload_stats *stats)
+{
+ _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
+ new->tx_pkts - old->tx_pkts);
+ stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ stats->qstats->overlimits += new->overlimits - old->overlimits;
+ stats->qstats->drops += new->drops - old->drops;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_alink_stats *prev_stats;
+ struct nfp_alink_stats stats;
+ int i, err;
+
+ i = nfp_abm_red_find(alink, opt);
+ if (i < 0)
+ return i;
+ prev_stats = &alink->qdiscs[i].stats;
+
+ if (alink->parent == TC_H_ROOT)
+ err = nfp_abm_ctrl_read_stats(alink, &stats);
+ else
+ err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
+ if (err)
+ return err;
+
+ nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
+
+ *prev_stats = stats;
+
+ return 0;
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_alink_xstats *prev_xstats;
+ struct nfp_alink_xstats xstats;
+ int i, err;
+
+ i = nfp_abm_red_find(alink, opt);
+ if (i < 0)
+ return i;
+ prev_xstats = &alink->qdiscs[i].xstats;
+
+ if (alink->parent == TC_H_ROOT)
+ err = nfp_abm_ctrl_read_xstats(alink, &xstats);
+ else
+ err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
+ if (err)
+ return err;
+
+ opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
+ opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
+
+ *prev_xstats = xstats;
+
+ return 0;
+}
+
+static int
+nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_RED_REPLACE:
+ return nfp_abm_red_replace(netdev, alink, opt);
+ case TC_RED_DESTROY:
+ nfp_abm_red_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_RED_STATS:
+ return nfp_abm_red_stats(alink, opt);
+ case TC_RED_XSTATS:
+ return nfp_abm_red_xstats(alink, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
+{
+ struct nfp_alink_stats stats;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < alink->num_qdiscs; i++) {
+ if (alink->qdiscs[i].handle == TC_H_UNSPEC)
+ continue;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
+ if (err)
+ return err;
+
+ nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
+ &opt->stats);
+ }
+
+ return 0;
+}
+
+static int
+nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_MQ_CREATE:
+ nfp_abm_reset_root(netdev, alink, opt->handle,
+ alink->total_queues);
+ return 0;
+ case TC_MQ_DESTROY:
+ if (opt->handle == alink->parent)
+ nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
+ return 0;
+ case TC_MQ_STATS:
+ return nfp_abm_mq_stats(alink, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port || port->type != NFP_PORT_PF_PORT)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQ:
+ return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
+ case TC_SETUP_QDISC_RED:
+ return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id)
+{
+ enum nfp_repr_type rtype;
+ struct nfp_reprs *reprs;
+ u8 port;
+
+ rtype = FIELD_GET(NFP_ABM_PORTID_TYPE, port_id);
+ port = FIELD_GET(NFP_ABM_PORTID_ID, port_id);
+
+ reprs = rcu_dereference(app->reprs[rtype]);
+ if (!reprs)
+ return NULL;
+
+ if (port >= reprs->num_reprs)
+ return NULL;
+
+ return rcu_dereference(reprs->reprs[port]);
+}
+
+static int
+nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink,
+ enum nfp_port_type ptype)
+{
+ struct net_device *netdev;
+ enum nfp_repr_type rtype;
+ struct nfp_reprs *reprs;
+ struct nfp_repr *repr;
+ struct nfp_port *port;
+ unsigned int txqs;
+ int err;
+
+ if (ptype == NFP_PORT_PHYS_PORT) {
+ rtype = NFP_REPR_TYPE_PHYS_PORT;
+ txqs = 1;
+ } else {
+ rtype = NFP_REPR_TYPE_PF;
+ txqs = alink->vnic->max_rx_rings;
+ }
+
+ netdev = nfp_repr_alloc_mqs(app, txqs, 1);
+ if (!netdev)
+ return -ENOMEM;
+ repr = netdev_priv(netdev);
+ repr->app_priv = alink;
+
+ port = nfp_port_alloc(app, ptype, netdev);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
+ goto err_free_repr;
+ }
+
+ if (ptype == NFP_PORT_PHYS_PORT) {
+ port->eth_forced = true;
+ err = nfp_port_init_phy_port(app->pf, app, port, alink->id);
+ if (err)
+ goto err_free_port;
+ } else {
+ port->pf_id = alink->abm->pf_id;
+ port->pf_split = app->pf->max_data_vnics > 1;
+ port->pf_split_id = alink->id;
+ port->vnic = alink->vnic->dp.ctrl_bar;
+ }
+
+ SET_NETDEV_DEV(netdev, &alink->vnic->pdev->dev);
+ eth_hw_addr_random(netdev);
+
+ err = nfp_repr_init(app, netdev, nfp_abm_portid(rtype, alink->id),
+ port, alink->vnic->dp.netdev);
+ if (err)
+ goto err_free_port;
+
+ reprs = nfp_reprs_get_locked(app, rtype);
+ WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr");
+ rcu_assign_pointer(reprs->reprs[alink->id], netdev);
+
+ nfp_info(app->cpp, "%s Port %d Representor(%s) created\n",
+ ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys",
+ alink->id, netdev->name);
+
+ return 0;
+
+err_free_port:
+ nfp_port_free(port);
+err_free_repr:
+ nfp_repr_free(netdev);
+ return err;
+}
+
+static void
+nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink,
+ enum nfp_repr_type rtype)
+{
+ struct net_device *netdev;
+ struct nfp_reprs *reprs;
+
+ reprs = nfp_reprs_get_locked(app, rtype);
+ netdev = nfp_repr_get_locked(app, reprs, alink->id);
+ if (!netdev)
+ return;
+ rcu_assign_pointer(reprs->reprs[alink->id], NULL);
+ synchronize_rcu();
+ /* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */
+ nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev));
+}
+
+static void
+nfp_abm_kill_reprs(struct nfp_abm *abm, struct nfp_abm_link *alink)
+{
+ nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PF);
+ nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PHYS_PORT);
+}
+
+static void nfp_abm_kill_reprs_all(struct nfp_abm *abm)
+{
+ struct nfp_pf *pf = abm->app->pf;
+ struct nfp_net *nn;
+
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ nfp_abm_kill_reprs(abm, (struct nfp_abm_link *)nn->app_priv);
+}
+
+static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app)
+{
+ struct nfp_abm *abm = app->priv;
+
+ return abm->eswitch_mode;
+}
+
+static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm)
+{
+ nfp_abm_kill_reprs_all(abm);
+ nfp_abm_ctrl_qm_disable(abm);
+
+ abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ return 0;
+}
+
+static void nfp_abm_eswitch_clean_up(struct nfp_abm *abm)
+{
+ if (abm->eswitch_mode != DEVLINK_ESWITCH_MODE_LEGACY)
+ WARN_ON(nfp_abm_eswitch_set_legacy(abm));
+}
+
+static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm)
+{
+ struct nfp_app *app = abm->app;
+ struct nfp_pf *pf = app->pf;
+ struct nfp_net *nn;
+ int err;
+
+ err = nfp_abm_ctrl_qm_enable(abm);
+ if (err)
+ return err;
+
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
+ struct nfp_abm_link *alink = nn->app_priv;
+
+ err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PHYS_PORT);
+ if (err)
+ goto err_kill_all_reprs;
+
+ err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PF_PORT);
+ if (err)
+ goto err_kill_all_reprs;
+ }
+
+ abm->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ return 0;
+
+err_kill_all_reprs:
+ nfp_abm_kill_reprs_all(abm);
+ nfp_abm_ctrl_qm_disable(abm);
+ return err;
+}
+
+static int nfp_abm_eswitch_mode_set(struct nfp_app *app, u16 mode)
+{
+ struct nfp_abm *abm = app->priv;
+
+ if (abm->eswitch_mode == mode)
+ return 0;
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ return nfp_abm_eswitch_set_legacy(abm);
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ return nfp_abm_eswitch_set_switchdev(abm);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void
+nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
+ unsigned int id)
+{
+ struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id];
+ u8 mac_addr[ETH_ALEN];
+ const char *mac_str;
+ char name[32];
+
+ if (id > pf->eth_tbl->count) {
+ nfp_warn(pf->cpp, "No entry for persistent MAC address\n");
+ eth_hw_addr_random(nn->dp.netdev);
+ return;
+ }
+
+ snprintf(name, sizeof(name), "eth%u.mac.pf%u",
+ eth_port->eth_index, abm->pf_id);
+
+ mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
+ if (!mac_str) {
+ nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n",
+ name);
+ eth_hw_addr_random(nn->dp.netdev);
+ return;
+ }
+
+ if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &mac_addr[0], &mac_addr[1], &mac_addr[2],
+ &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
+ nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n",
+ mac_str);
+ eth_hw_addr_random(nn->dp.netdev);
+ return;
+ }
+
+ ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
+}
+
+static int
+nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+ struct nfp_eth_table_port *eth_port = &app->pf->eth_tbl->ports[id];
+ struct nfp_abm *abm = app->priv;
+ struct nfp_abm_link *alink;
+ int err;
+
+ alink = kzalloc(sizeof(*alink), GFP_KERNEL);
+ if (!alink)
+ return -ENOMEM;
+ nn->app_priv = alink;
+ alink->abm = abm;
+ alink->vnic = nn;
+ alink->id = id;
+ alink->parent = TC_H_ROOT;
+ alink->total_queues = alink->vnic->max_rx_rings;
+ alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues,
+ GFP_KERNEL);
+ if (!alink->qdiscs) {
+ err = -ENOMEM;
+ goto err_free_alink;
+ }
+
+ /* This is a multi-host app, make sure MAC/PHY is up, but don't
+ * make the MAC/PHY state follow the state of any of the ports.
+ */
+ err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
+ if (err < 0)
+ goto err_free_qdiscs;
+
+ netif_keep_dst(nn->dp.netdev);
+
+ nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
+ nfp_abm_ctrl_read_params(alink);
+
+ return 0;
+
+err_free_qdiscs:
+ kvfree(alink->qdiscs);
+err_free_alink:
+ kfree(alink);
+ return err;
+}
+
+static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
+{
+ struct nfp_abm_link *alink = nn->app_priv;
+
+ nfp_abm_kill_reprs(alink->abm, alink);
+ kvfree(alink->qdiscs);
+ kfree(alink);
+}
+
+static u64 *
+nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
+{
+ struct nfp_repr *repr = netdev_priv(port->netdev);
+ struct nfp_abm_link *alink;
+ unsigned int i;
+
+ if (port->type != NFP_PORT_PF_PORT)
+ return data;
+ alink = repr->app_priv;
+ for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) {
+ *data++ = nfp_abm_ctrl_stat_non_sto(alink, i);
+ *data++ = nfp_abm_ctrl_stat_sto(alink, i);
+ }
+ return data;
+}
+
+static int
+nfp_abm_port_get_stats_count(struct nfp_app *app, struct nfp_port *port)
+{
+ struct nfp_repr *repr = netdev_priv(port->netdev);
+ struct nfp_abm_link *alink;
+
+ if (port->type != NFP_PORT_PF_PORT)
+ return 0;
+ alink = repr->app_priv;
+ return alink->vnic->dp.num_r_vecs * 2;
+}
+
+static u8 *
+nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port,
+ u8 *data)
+{
+ struct nfp_repr *repr = netdev_priv(port->netdev);
+ struct nfp_abm_link *alink;
+ unsigned int i;
+
+ if (port->type != NFP_PORT_PF_PORT)
+ return data;
+ alink = repr->app_priv;
+ for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) {
+ data = nfp_pr_et(data, "q%u_no_wait", i);
+ data = nfp_pr_et(data, "q%u_delayed", i);
+ }
+ return data;
+}
+
+static int nfp_abm_init(struct nfp_app *app)
+{
+ struct nfp_pf *pf = app->pf;
+ struct nfp_reprs *reprs;
+ struct nfp_abm *abm;
+ int err;
+
+ if (!pf->eth_tbl) {
+ nfp_err(pf->cpp, "ABM NIC requires ETH table\n");
+ return -EINVAL;
+ }
+ if (pf->max_data_vnics != pf->eth_tbl->count) {
+ nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
+ pf->max_data_vnics, pf->eth_tbl->count);
+ return -EINVAL;
+ }
+ if (!pf->mac_stats_bar) {
+ nfp_warn(app->cpp, "ABM NIC requires mac_stats symbol\n");
+ return -EINVAL;
+ }
+
+ abm = kzalloc(sizeof(*abm), GFP_KERNEL);
+ if (!abm)
+ return -ENOMEM;
+ app->priv = abm;
+ abm->app = app;
+
+ err = nfp_abm_ctrl_find_addrs(abm);
+ if (err)
+ goto err_free_abm;
+
+ /* We start in legacy mode, make sure advanced queuing is disabled */
+ err = nfp_abm_ctrl_qm_disable(abm);
+ if (err)
+ goto err_free_abm;
+
+ err = -ENOMEM;
+ reprs = nfp_reprs_alloc(pf->max_data_vnics);
+ if (!reprs)
+ goto err_free_abm;
+ RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
+
+ reprs = nfp_reprs_alloc(pf->max_data_vnics);
+ if (!reprs)
+ goto err_free_phys;
+ RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PF], reprs);
+
+ return 0;
+
+err_free_phys:
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_abm:
+ kfree(abm);
+ app->priv = NULL;
+ return err;
+}
+
+static void nfp_abm_clean(struct nfp_app *app)
+{
+ struct nfp_abm *abm = app->priv;
+
+ nfp_abm_eswitch_clean_up(abm);
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
+ nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ kfree(abm);
+ app->priv = NULL;
+}
+
+const struct nfp_app_type app_abm = {
+ .id = NFP_APP_ACTIVE_BUFFER_MGMT_NIC,
+ .name = "abm",
+
+ .init = nfp_abm_init,
+ .clean = nfp_abm_clean,
+
+ .vnic_alloc = nfp_abm_vnic_alloc,
+ .vnic_free = nfp_abm_vnic_free,
+
+ .port_get_stats = nfp_abm_port_get_stats,
+ .port_get_stats_count = nfp_abm_port_get_stats_count,
+ .port_get_stats_strings = nfp_abm_port_get_stats_strings,
+
+ .setup_tc = nfp_abm_setup_tc,
+
+ .eswitch_mode_get = nfp_abm_eswitch_mode_get,
+ .eswitch_mode_set = nfp_abm_eswitch_mode_set,
+
+ .repr_get = nfp_abm_repr_get,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
new file mode 100644
index 000000000000..934a70835473
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_ABM_H__
+#define __NFP_ABM_H__ 1
+
+#include <net/devlink.h>
+
+struct nfp_app;
+struct nfp_net;
+
+#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
+#define NFP_ABM_PORTID_ID GENMASK(7, 0)
+
+/**
+ * struct nfp_abm - ABM NIC app structure
+ * @app: back pointer to nfp_app
+ * @pf_id: ID of our PF link
+ * @eswitch_mode: devlink eswitch mode, advanced functions only visible
+ * in switchdev mode
+ * @q_lvls: queue level control area
+ * @qm_stats: queue statistics symbol
+ */
+struct nfp_abm {
+ struct nfp_app *app;
+ unsigned int pf_id;
+ enum devlink_eswitch_mode eswitch_mode;
+ const struct nfp_rtsym *q_lvls;
+ const struct nfp_rtsym *qm_stats;
+};
+
+/**
+ * struct nfp_alink_stats - ABM NIC statistics
+ * @tx_pkts: number of TXed packets
+ * @tx_bytes: number of TXed bytes
+ * @backlog_pkts: momentary backlog length (packets)
+ * @backlog_bytes: momentary backlog length (bytes)
+ * @overlimits: number of ECN marked TXed packets (accumulative)
+ * @drops: number of tail-dropped packets (accumulative)
+ */
+struct nfp_alink_stats {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 backlog_pkts;
+ u64 backlog_bytes;
+ u64 overlimits;
+ u64 drops;
+};
+
+/**
+ * struct nfp_alink_xstats - extended ABM NIC statistics
+ * @ecn_marked: number of ECN marked TXed packets
+ * @pdrop: number of hard drops due to queue limit
+ */
+struct nfp_alink_xstats {
+ u64 ecn_marked;
+ u64 pdrop;
+};
+
+/**
+ * struct nfp_red_qdisc - representation of single RED Qdisc
+ * @handle: handle of currently offloaded RED Qdisc
+ * @stats: statistics from last refresh
+ * @xstats: base of extended statistics
+ */
+struct nfp_red_qdisc {
+ u32 handle;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_xstats xstats;
+};
+
+/**
+ * struct nfp_abm_link - port tuple of a ABM NIC
+ * @abm: back pointer to nfp_abm
+ * @vnic: data vNIC
+ * @id: id of the data vNIC
+ * @queue_base: id of base to host queue within PCIe (not QC idx)
+ * @total_queues: number of PF queues
+ * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
+ * @num_qdiscs: number of currently used qdiscs
+ * @qdiscs: array of qdiscs
+ */
+struct nfp_abm_link {
+ struct nfp_abm *abm;
+ struct nfp_net *vnic;
+ unsigned int id;
+ unsigned int queue_base;
+ unsigned int total_queues;
+ u32 parent;
+ unsigned int num_qdiscs;
+ struct nfp_red_qdisc *qdiscs;
+};
+
+void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
+int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
+int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
+ u32 val);
+int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
+ struct nfp_alink_stats *stats);
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+ struct nfp_alink_stats *stats);
+int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
+ struct nfp_alink_xstats *xstats);
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+ struct nfp_alink_xstats *xstats);
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
+int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
+int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 7e298148ca26..cb87fccb9f6a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -102,6 +102,15 @@ nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
return nfp_bpf_cmsg_alloc(bpf, size);
}
+static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
+{
+ struct cmsg_hdr *hdr;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+
+ return hdr->type;
+}
+
static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
@@ -431,6 +440,11 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
goto err_free;
}
+ if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
+ nfp_bpf_event_output(bpf, skb);
+ return;
+ }
+
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_cmsg_get_tag(skb);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 39639ac28b01..4c7972e3db63 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -37,11 +37,20 @@
#include <linux/bitops.h>
#include <linux/types.h>
+/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
+ * our FW ABI. In that case we will do translation in the driver.
+ */
+#define NFP_BPF_SCALAR_VALUE 1
+#define NFP_BPF_MAP_VALUE 4
+#define NFP_BPF_STACK 6
+#define NFP_BPF_PACKET_DATA 8
+
enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_FUNC = 1,
NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
NFP_BPF_CAP_TYPE_MAPS = 3,
NFP_BPF_CAP_TYPE_RANDOM = 4,
+ NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
};
struct nfp_bpf_cap_tlv_func {
@@ -81,6 +90,7 @@ enum nfp_bpf_cmsg_type {
CMSG_TYPE_MAP_DELETE = 5,
CMSG_TYPE_MAP_GETNEXT = 6,
CMSG_TYPE_MAP_GETFIRST = 7,
+ CMSG_TYPE_BPF_EVENT = 8,
__CMSG_TYPE_MAP_MAX,
};
@@ -155,4 +165,13 @@ struct cmsg_reply_map_op {
__be32 resv;
struct cmsg_key_value_pair elem[0];
};
+
+struct cmsg_bpf_event {
+ struct cmsg_hdr hdr;
+ __be32 cpu_id;
+ __be64 map_ptr;
+ __be32 data_size;
+ __be32 pkt_size;
+ u8 data[0];
+};
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 29b4e5f8c102..8a92088df0d7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -42,6 +42,7 @@
#include "main.h"
#include "../nfp_asm.h"
+#include "../nfp_net_ctrl.h"
/* --- NFP prog --- */
/* Foreach "multiple" entries macros provide pos and next<n> pointers.
@@ -211,6 +212,60 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
}
static void
+__emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer,
+ bool set, bool src_lmextn)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BR_BIT_BASE |
+ FIELD_PREP(OP_BR_BIT_A_SRC, areg) |
+ FIELD_PREP(OP_BR_BIT_B_SRC, breg) |
+ FIELD_PREP(OP_BR_BIT_BV, set) |
+ FIELD_PREP(OP_BR_BIT_DEFBR, defer) |
+ FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) |
+ FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr,
+ u8 defer, bool set, enum nfp_relo_type relo)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ /* NOTE: The bit to test is specified as an rotation amount, such that
+ * the bit to test will be placed on the MSB of the result when
+ * doing a rotate right. For bit X, we need right rotate X + 1.
+ */
+ bit += 1;
+
+ err = swreg_to_restricted(reg_none(), src, reg_imm(bit), &reg, false);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set,
+ reg.src_lmextn);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
+static void
+emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
+{
+ emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL);
+}
+
+static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
enum immed_shift shift, bool wr_both,
@@ -309,6 +364,19 @@ emit_shf(struct nfp_prog *nfp_prog, swreg dst,
}
static void
+emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc)
+{
+ if (sc == SHF_SC_R_ROT) {
+ pr_err("indirect shift is not allowed on rotation\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0);
+}
+
+static void
__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
bool dst_lmextn, bool src_lmextn)
@@ -1214,45 +1282,83 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
-static int
-wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum br_mask br_mask, bool swap)
+static const struct jmp_code_map {
+ enum br_mask br_mask;
+ bool swap;
+} jmp_code_map[] = {
+ [BPF_JGT >> 4] = { BR_BLO, true },
+ [BPF_JGE >> 4] = { BR_BHS, false },
+ [BPF_JLT >> 4] = { BR_BLO, false },
+ [BPF_JLE >> 4] = { BR_BHS, true },
+ [BPF_JSGT >> 4] = { BR_BLT, true },
+ [BPF_JSGE >> 4] = { BR_BGE, false },
+ [BPF_JSLT >> 4] = { BR_BLT, false },
+ [BPF_JSLE >> 4] = { BR_BGE, true },
+};
+
+static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
+{
+ unsigned int op;
+
+ op = BPF_OP(meta->insn.code) >> 4;
+ /* br_mask of 0 is BR_BEQ which we don't use in jump code table */
+ if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
+ !jmp_code_map[op].br_mask,
+ "no code found for jump instruction"))
+ return NULL;
+
+ return &jmp_code_map[op];
+}
+
+static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ const struct jmp_code_map *code;
+ enum alu_op alu_op, carry_op;
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
+ code = nfp_jmp_code_get(meta);
+ if (!code)
+ return -EINVAL;
+
+ alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
+ carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
+
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
- if (!swap)
- emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ if (!code->swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
else
- emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+ emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
- if (!swap)
+ if (!code->swap)
emit_alu(nfp_prog, reg_none(),
- reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
- tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+ tmp_reg, carry_op, reg_a(reg + 1));
- emit_br(nfp_prog, br_mask, insn->off, 0);
+ emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
-static int
-wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum br_mask br_mask, bool swap)
+static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
+ const struct jmp_code_map *code;
u8 areg, breg;
+ code = nfp_jmp_code_get(meta);
+ if (!code)
+ return -EINVAL;
+
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
- if (swap) {
+ if (code->swap) {
areg ^= breg;
breg ^= areg;
areg ^= breg;
@@ -1261,7 +1367,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
emit_alu(nfp_prog, reg_none(),
reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
- emit_br(nfp_prog, br_mask, insn->off, 0);
+ emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
@@ -1357,15 +1463,9 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int
map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- struct bpf_offloaded_map *offmap;
- struct nfp_bpf_map *nfp_map;
bool load_lm_ptr;
u32 ret_tgt;
s64 lm_off;
- swreg tid;
-
- offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
- nfp_map = offmap->dev_priv;
/* We only have to reload LM0 if the key is not at start of stack */
lm_off = nfp_prog->stack_depth;
@@ -1378,17 +1478,12 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (meta->func_id == BPF_FUNC_map_update_elem)
emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
- /* Load map ID into a register, it should actually fit as an immediate
- * but in case it doesn't deal with it here, not in the delay slots.
- */
- tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
-
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
2, RELO_BR_HELPER);
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
/* Load map ID into A0 */
- wrp_mov(nfp_prog, reg_a(0), tid);
+ wrp_mov(nfp_prog, reg_a(0), reg_a(2));
/* Load the return address into B0 */
wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
@@ -1400,7 +1495,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (!load_lm_ptr)
return 0;
- emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
wrp_nops(nfp_prog, 3);
return 0;
@@ -1418,6 +1513,63 @@ nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int
+nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ swreg ptr_type;
+ u32 ret_tgt;
+
+ ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
+
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
+
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
+ 2, RELO_BR_HELPER);
+
+ /* Load ptr type into A1 */
+ wrp_mov(nfp_prog, reg_a(1), ptr_type);
+
+ /* Load the return address into B0 */
+ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 jmp_tgt;
+
+ jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5;
+
+ /* Make sure the queue id fits into FW field */
+ emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2),
+ ALU_OP_AND_NOT_B, reg_imm(0xff));
+ emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2);
+
+ /* Set the 'queue selected' bit and the queue value */
+ emit_shf(nfp_prog, pv_qsel_set(nfp_prog),
+ pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1),
+ SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT);
+ emit_ld_field(nfp_prog,
+ pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2),
+ SHF_SC_NONE, 0);
+ /* Delay slots end here, we will jump over next instruction if queue
+ * value fits into the field.
+ */
+ emit_ld_field(nfp_prog,
+ pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX),
+ SHF_SC_NONE, 0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt))
+ return -EINVAL;
+
+ return 0;
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1544,26 +1696,142 @@ static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+/* Pseudo code:
+ * if shift_amt >= 32
+ * dst_high = dst_low << shift_amt[4:0]
+ * dst_low = 0;
+ * else
+ * dst_high = (dst_high, dst_low) >> (32 - shift_amt)
+ * dst_low = dst_low << shift_amt
+ *
+ * The indirect shift will use the same logic at runtime.
+ */
+static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ if (shift_amt < 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1),
+ SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF,
+ 32 - shift_amt);
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_L_SHF, shift_amt);
+ } else if (shift_amt == 32) {
+ wrp_reg_mov(nfp_prog, dst + 1, dst);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ } else if (shift_amt > 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_L_SHF, shift_amt - 32);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ }
+
+ return 0;
+}
+
static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- if (insn->imm < 32) {
- emit_shf(nfp_prog, reg_both(dst + 1),
- reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
- SHF_SC_R_DSHF, 32 - insn->imm);
- emit_shf(nfp_prog, reg_both(dst),
- reg_none(), SHF_OP_NONE, reg_b(dst),
- SHF_SC_L_SHF, insn->imm);
- } else if (insn->imm == 32) {
- wrp_reg_mov(nfp_prog, dst + 1, dst);
- wrp_immed(nfp_prog, reg_both(dst), 0);
- } else if (insn->imm > 32) {
- emit_shf(nfp_prog, reg_both(dst + 1),
- reg_none(), SHF_OP_NONE, reg_b(dst),
- SHF_SC_L_SHF, insn->imm - 32);
- wrp_immed(nfp_prog, reg_both(dst), 0);
+ return __shl_imm64(nfp_prog, dst, insn->imm);
+}
+
+static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB,
+ reg_b(src));
+ emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_R_DSHF);
+}
+
+/* NOTE: for indirect left shift, HIGH part should be calculated first. */
+static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_L_SHF);
+}
+
+static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ shl_reg64_lt32_high(nfp_prog, dst, src);
+ shl_reg64_lt32_low(nfp_prog, dst, src);
+}
+
+static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_L_SHF);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+}
+
+static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin;
+ umax = meta->umax;
+ if (umin == umax)
+ return __shl_imm64(nfp_prog, dst, umin);
+
+ src = insn->src_reg * 2;
+ if (umax < 32) {
+ shl_reg64_lt32(nfp_prog, dst, src);
+ } else if (umin >= 32) {
+ shl_reg64_ge32(nfp_prog, dst, src);
+ } else {
+ /* Generate different instruction sequences depending on runtime
+ * value of shift amount.
+ */
+ u16 label_ge32, label_end;
+
+ label_ge32 = nfp_prog_current_offset(nfp_prog) + 7;
+ emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
+
+ shl_reg64_lt32_high(nfp_prog, dst, src);
+ label_end = nfp_prog_current_offset(nfp_prog) + 6;
+ emit_br(nfp_prog, BR_UNC, label_end, 2);
+ /* shl_reg64_lt32_low packed in delay slot. */
+ shl_reg64_lt32_low(nfp_prog, dst, src);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
+ return -EINVAL;
+ shl_reg64_ge32(nfp_prog, dst, src);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pseudo code:
+ * if shift_amt >= 32
+ * dst_high = 0;
+ * dst_low = dst_high >> shift_amt[4:0]
+ * else
+ * dst_high = dst_high >> shift_amt
+ * dst_low = (dst_high, dst_low) >> shift_amt
+ *
+ * The indirect shift will use the same logic at runtime.
+ */
+static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ if (shift_amt < 32) {
+ emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_R_DSHF, shift_amt);
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
+ reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
+ } else if (shift_amt == 32) {
+ wrp_reg_mov(nfp_prog, dst, dst + 1);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else if (shift_amt > 32) {
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
}
return 0;
@@ -1574,21 +1842,186 @@ static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- if (insn->imm < 32) {
- emit_shf(nfp_prog, reg_both(dst),
- reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
- SHF_SC_R_DSHF, insn->imm);
- emit_shf(nfp_prog, reg_both(dst + 1),
- reg_none(), SHF_OP_NONE, reg_b(dst + 1),
- SHF_SC_R_SHF, insn->imm);
- } else if (insn->imm == 32) {
+ return __shr_imm64(nfp_prog, dst, insn->imm);
+}
+
+/* NOTE: for indirect right shift, LOW part should be calculated first. */
+static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
+ reg_b(dst + 1), SHF_SC_R_SHF);
+}
+
+static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_R_DSHF);
+}
+
+static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ shr_reg64_lt32_low(nfp_prog, dst, src);
+ shr_reg64_lt32_high(nfp_prog, dst, src);
+}
+
+static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst + 1), SHF_SC_R_SHF);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+}
+
+static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin;
+ umax = meta->umax;
+ if (umin == umax)
+ return __shr_imm64(nfp_prog, dst, umin);
+
+ src = insn->src_reg * 2;
+ if (umax < 32) {
+ shr_reg64_lt32(nfp_prog, dst, src);
+ } else if (umin >= 32) {
+ shr_reg64_ge32(nfp_prog, dst, src);
+ } else {
+ /* Generate different instruction sequences depending on runtime
+ * value of shift amount.
+ */
+ u16 label_ge32, label_end;
+
+ label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
+ emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
+ shr_reg64_lt32_low(nfp_prog, dst, src);
+ label_end = nfp_prog_current_offset(nfp_prog) + 6;
+ emit_br(nfp_prog, BR_UNC, label_end, 2);
+ /* shr_reg64_lt32_high packed in delay slot. */
+ shr_reg64_lt32_high(nfp_prog, dst, src);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
+ return -EINVAL;
+ shr_reg64_ge32(nfp_prog, dst, src);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Code logic is the same as __shr_imm64 except ashr requires signedness bit
+ * told through PREV_ALU result.
+ */
+static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ if (shift_amt < 32) {
+ emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_R_DSHF, shift_amt);
+ /* Set signedness bit. */
+ emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
+ reg_imm(0));
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
+ } else if (shift_amt == 32) {
+ /* NOTE: this also helps setting signedness bit. */
wrp_reg_mov(nfp_prog, dst, dst + 1);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
- } else if (insn->imm > 32) {
- emit_shf(nfp_prog, reg_both(dst),
- reg_none(), SHF_OP_NONE, reg_b(dst + 1),
- SHF_SC_R_SHF, insn->imm - 32);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF, 31);
+ } else if (shift_amt > 32) {
+ emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
+ reg_imm(0));
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF, 31);
+ }
+
+ return 0;
+}
+
+static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ return __ashr_imm64(nfp_prog, dst, insn->imm);
+}
+
+static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ /* NOTE: the first insn will set both indirect shift amount (source A)
+ * and signedness bit (MSB of result).
+ */
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
+ emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF);
+}
+
+static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ /* NOTE: it is the same as logic shift because we don't need to shift in
+ * signedness bit when the shift amount is less than 32.
+ */
+ return shr_reg64_lt32_low(nfp_prog, dst, src);
+}
+
+static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ ashr_reg64_lt32_low(nfp_prog, dst, src);
+ ashr_reg64_lt32_high(nfp_prog, dst, src);
+}
+
+static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF);
+ emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
+ reg_b(dst + 1), SHF_SC_R_SHF, 31);
+}
+
+/* Like ashr_imm64, but need to use indirect shift. */
+static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin;
+ umax = meta->umax;
+ if (umin == umax)
+ return __ashr_imm64(nfp_prog, dst, umin);
+
+ src = insn->src_reg * 2;
+ if (umax < 32) {
+ ashr_reg64_lt32(nfp_prog, dst, src);
+ } else if (umin >= 32) {
+ ashr_reg64_ge32(nfp_prog, dst, src);
+ } else {
+ u16 label_ge32, label_end;
+
+ label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
+ emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
+ ashr_reg64_lt32_low(nfp_prog, dst, src);
+ label_end = nfp_prog_current_offset(nfp_prog) + 6;
+ emit_br(nfp_prog, BR_UNC, label_end, 2);
+ /* ashr_reg64_lt32_high packed in delay slot. */
+ ashr_reg64_lt32_high(nfp_prog, dst, src);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
+ return -EINVAL;
+ ashr_reg64_ge32(nfp_prog, dst, src);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
+ return -EINVAL;
}
return 0;
@@ -2108,6 +2541,17 @@ mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
false, wrp_lmem_store);
}
+static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_md, rx_queue_index):
+ return nfp_queue_select(nfp_prog, meta);
+ }
+
+ WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */
+ return -EOPNOTSUPP;
+}
+
static int
mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
@@ -2134,6 +2578,9 @@ static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->ptr.type == PTR_TO_CTX)
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP)
+ return mem_stx_xdp(nfp_prog, meta);
return mem_stx(nfp_prog, meta, 4);
}
@@ -2283,46 +2730,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
-}
-
-static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
-}
-
-static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
-}
-
-static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
-}
-
-static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
-}
-
-static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
-}
-
-static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
-}
-
-static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
-}
-
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -2392,46 +2799,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
-}
-
-static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
-}
-
-static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
-}
-
-static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
-}
-
-static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
-}
-
-static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
-}
-
-static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
-}
-
-static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
-}
-
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -2453,6 +2820,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return map_call_stack_common(nfp_prog, meta);
case BPF_FUNC_get_prandom_u32:
return nfp_get_prandom_u32(nfp_prog, meta);
+ case BPF_FUNC_perf_event_output:
+ return nfp_perf_event_output(nfp_prog, meta);
default:
WARN_ONCE(1, "verifier allowed unsupported function\n");
return -EOPNOTSUPP;
@@ -2480,8 +2849,12 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
[BPF_ALU64 | BPF_NEG] = neg_reg64,
+ [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
+ [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64,
[BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
+ [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64,
+ [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64,
[BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
[BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
[BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
@@ -2520,25 +2893,25 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
- [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
- [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
- [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
- [BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
- [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
- [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
- [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
- [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
- [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
- [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
- [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
- [BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
- [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
- [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
- [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
- [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
@@ -2777,6 +3150,54 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
+/* abs(insn.imm) will fit better into unrestricted reg immediate -
+ * convert add/sub of a negative number into a sub/add of a positive one.
+ */
+static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ if (meta->skip)
+ continue;
+
+ if (BPF_CLASS(insn.code) != BPF_ALU &&
+ BPF_CLASS(insn.code) != BPF_ALU64 &&
+ BPF_CLASS(insn.code) != BPF_JMP)
+ continue;
+ if (BPF_SRC(insn.code) != BPF_K)
+ continue;
+ if (insn.imm >= 0)
+ continue;
+
+ if (BPF_CLASS(insn.code) == BPF_JMP) {
+ switch (BPF_OP(insn.code)) {
+ case BPF_JGE:
+ case BPF_JSGE:
+ case BPF_JLT:
+ case BPF_JSLT:
+ meta->jump_neg_op = true;
+ break;
+ default:
+ continue;
+ }
+ } else {
+ if (BPF_OP(insn.code) == BPF_ADD)
+ insn.code = BPF_CLASS(insn.code) | BPF_SUB;
+ else if (BPF_OP(insn.code) == BPF_SUB)
+ insn.code = BPF_CLASS(insn.code) | BPF_ADD;
+ else
+ continue;
+
+ meta->insn.code = insn.code | BPF_K;
+ }
+
+ meta->insn.imm = -insn.imm;
+ }
+}
+
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -3212,6 +3633,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
+ nfp_bpf_opt_neg_add_sub(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
nfp_bpf_opt_ldst_gather(nfp_prog);
@@ -3220,6 +3642,33 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
return 0;
}
+static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2;
+ struct nfp_bpf_map *nfp_map;
+ struct bpf_map *map;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ if (meta1->skip || meta2->skip)
+ continue;
+
+ if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
+ meta1->insn.src_reg != BPF_PSEUDO_MAP_FD)
+ continue;
+
+ map = (void *)(unsigned long)((u32)meta1->insn.imm |
+ (u64)meta2->insn.imm << 32);
+ if (bpf_map_offload_neutral(map))
+ continue;
+ nfp_map = map_to_offmap(map)->dev_priv;
+
+ meta1->insn.imm = nfp_map->tid;
+ meta2->insn.imm = 0;
+ }
+
+ return 0;
+}
+
static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{
__le64 *ustore = (__force __le64 *)prog;
@@ -3256,6 +3705,10 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
+ ret = nfp_bpf_replace_map_ptrs(nfp_prog);
+ if (ret)
+ return ret;
+
ret = nfp_bpf_optimize(nfp_prog);
if (ret)
return ret;
@@ -3346,6 +3799,9 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
case BPF_FUNC_map_delete_elem:
val = nfp_prog->bpf->helpers.map_delete;
break;
+ case BPF_FUNC_perf_event_output:
+ val = nfp_prog->bpf->helpers.perf_event_output;
+ break;
default:
pr_err("relocation of unknown helper %d\n",
val);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 35fb31f682af..fcdfb8e7fdea 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -43,6 +43,14 @@
#include "fw.h"
#include "main.h"
+const struct rhashtable_params nfp_bpf_maps_neutral_params = {
+ .nelem_hint = 4,
+ .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
+ .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr),
+ .head_offset = offsetof(struct nfp_bpf_neutral_map, l),
+ .automatic_shrinking = true,
+};
+
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
#ifdef __LITTLE_ENDIAN
@@ -290,6 +298,9 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
case BPF_FUNC_map_delete_elem:
bpf->helpers.map_delete = readl(&cap->func_addr);
break;
+ case BPF_FUNC_perf_event_output:
+ bpf->helpers.perf_event_output = readl(&cap->func_addr);
+ break;
}
return 0;
@@ -323,6 +334,13 @@ nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
return 0;
}
+static int
+nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ bpf->queue_select = true;
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -365,6 +383,10 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
if (nfp_bpf_parse_cap_random(app->priv, value, length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_QUEUE_SELECT:
+ if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -401,17 +423,28 @@ static int nfp_bpf_init(struct nfp_app *app)
init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list);
- err = nfp_bpf_parse_capabilities(app);
+ err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
if (err)
goto err_free_bpf;
+ err = nfp_bpf_parse_capabilities(app);
+ if (err)
+ goto err_free_neutral_maps;
+
return 0;
+err_free_neutral_maps:
+ rhashtable_destroy(&bpf->maps_neutral);
err_free_bpf:
kfree(bpf);
return err;
}
+static void nfp_check_rhashtable_empty(void *ptr, void *arg)
+{
+ WARN_ON_ONCE(1);
+}
+
static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
@@ -419,6 +452,8 @@ static void nfp_bpf_clean(struct nfp_app *app)
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+ rhashtable_free_and_destroy(&bpf->maps_neutral,
+ nfp_check_rhashtable_empty, NULL);
kfree(bpf);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4981c8944ca3..654fe7823e5e 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -39,6 +39,7 @@
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/rhashtable.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -81,10 +82,16 @@ enum static_regs {
enum pkt_vec {
PKT_VEC_PKT_LEN = 0,
PKT_VEC_PKT_PTR = 2,
+ PKT_VEC_QSEL_SET = 4,
+ PKT_VEC_QSEL_VAL = 6,
};
+#define PKT_VEL_QSEL_SET_BIT 4
+
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
+#define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
+#define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
#define stack_reg(np) reg_a(STATIC_REG_STACK)
#define stack_imm(np) imm_b(np)
@@ -114,6 +121,8 @@ enum pkt_vec {
* @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps
*
+ * @maps_neutral: hash table of offload-neutral maps (on pointer)
+ *
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
* @adjust_head.off_min: minimal packet offset within buffer required
@@ -133,8 +142,10 @@ enum pkt_vec {
* @helpers.map_lookup: map lookup helper address
* @helpers.map_update: map update helper address
* @helpers.map_delete: map delete helper address
+ * @helpers.perf_event_output: output perf event to a ring buffer
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
+ * @queue_select: BPF can set the RX queue ID in packet vector
*/
struct nfp_app_bpf {
struct nfp_app *app;
@@ -150,6 +161,8 @@ struct nfp_app_bpf {
unsigned int maps_in_use;
unsigned int map_elems_in_use;
+ struct rhashtable maps_neutral;
+
struct nfp_bpf_cap_adjust_head {
u32 flags;
int off_min;
@@ -171,9 +184,11 @@ struct nfp_app_bpf {
u32 map_lookup;
u32 map_update;
u32 map_delete;
+ u32 perf_event_output;
} helpers;
bool pseudo_random;
+ bool queue_select;
};
enum nfp_bpf_map_use {
@@ -199,6 +214,14 @@ struct nfp_bpf_map {
enum nfp_bpf_map_use use_map[];
};
+struct nfp_bpf_neutral_map {
+ struct rhash_head l;
+ struct bpf_map *ptr;
+ u32 count;
+};
+
+extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
+
struct nfp_prog;
struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -236,9 +259,12 @@ struct nfp_bpf_reg_state {
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
+ * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
+ * @umin: copy of core verifier umin_value.
+ * @umax: copy of core verifier umax_value.
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -264,13 +290,23 @@ struct nfp_insn_meta {
bool xadd_maybe_16bit;
};
/* jump */
- struct nfp_insn_meta *jmp_dst;
+ struct {
+ struct nfp_insn_meta *jmp_dst;
+ bool jump_neg_op;
+ };
/* function calls */
struct {
u32 func_id;
struct bpf_reg_state arg1;
struct nfp_bpf_reg_state arg2;
};
+ /* We are interested in range info for some operands,
+ * for example, the shift amount.
+ */
+ struct {
+ u64 umin;
+ u64 umax;
+ };
};
unsigned int off;
unsigned short n;
@@ -348,6 +384,25 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
+static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+{
+ u8 code = meta->insn.code;
+ bool is_alu, is_shift;
+ u8 opclass, opcode;
+
+ opclass = BPF_CLASS(code);
+ is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
+ if (!is_alu)
+ return false;
+
+ opcode = BPF_OP(code);
+ is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
+ if (!is_shift)
+ return false;
+
+ return BPF_SRC(code) == BPF_X;
+}
+
/**
* struct nfp_prog - nfp BPF program
* @bpf: backpointer to the bpf app priv structure
@@ -363,6 +418,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
* @adjust_head_location: if program has single adjust head call - the insn no.
+ * @map_records_cnt: the number of map pointers recorded for this prog
+ * @map_records: the map record pointers from bpf->maps_neutral
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -386,6 +443,9 @@ struct nfp_prog {
unsigned int stack_depth;
unsigned int adjust_head_location;
+ unsigned int map_records_cnt;
+ struct nfp_bpf_neutral_map **map_records;
+
struct list_head insns;
};
@@ -436,5 +496,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 42d98792bd25..7eae4c0266f8 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -57,6 +57,126 @@
#include "../nfp_net.h"
static int
+nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct bpf_map *map)
+{
+ struct nfp_bpf_neutral_map *record;
+ int err;
+
+ /* Map record paths are entered via ndo, update side is protected. */
+ ASSERT_RTNL();
+
+ /* Reuse path - other offloaded program is already tracking this map. */
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ nfp_bpf_maps_neutral_params);
+ if (record) {
+ nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
+ record->count++;
+ return 0;
+ }
+
+ /* Grab a single ref to the map for our record. The prog destroy ndo
+ * happens after free_used_maps().
+ */
+ map = bpf_map_inc(map, false);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ record = kmalloc(sizeof(*record), GFP_KERNEL);
+ if (!record) {
+ err = -ENOMEM;
+ goto err_map_put;
+ }
+
+ record->ptr = map;
+ record->count = 1;
+
+ err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
+ nfp_bpf_maps_neutral_params);
+ if (err)
+ goto err_free_rec;
+
+ nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
+
+ return 0;
+
+err_free_rec:
+ kfree(record);
+err_map_put:
+ bpf_map_put(map);
+ return err;
+}
+
+static void
+nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
+{
+ bool freed = false;
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = 0; i < nfp_prog->map_records_cnt; i++) {
+ if (--nfp_prog->map_records[i]->count) {
+ nfp_prog->map_records[i] = NULL;
+ continue;
+ }
+
+ WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
+ &nfp_prog->map_records[i]->l,
+ nfp_bpf_maps_neutral_params));
+ freed = true;
+ }
+
+ if (freed) {
+ synchronize_rcu();
+
+ for (i = 0; i < nfp_prog->map_records_cnt; i++)
+ if (nfp_prog->map_records[i]) {
+ bpf_map_put(nfp_prog->map_records[i]->ptr);
+ kfree(nfp_prog->map_records[i]);
+ }
+ }
+
+ kfree(nfp_prog->map_records);
+ nfp_prog->map_records = NULL;
+ nfp_prog->map_records_cnt = 0;
+}
+
+static int
+nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct bpf_prog *prog)
+{
+ int i, cnt, err;
+
+ /* Quickly count the maps we will have to remember */
+ cnt = 0;
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
+ cnt++;
+ if (!cnt)
+ return 0;
+
+ nfp_prog->map_records = kmalloc_array(cnt,
+ sizeof(nfp_prog->map_records[0]),
+ GFP_KERNEL);
+ if (!nfp_prog->map_records)
+ return -ENOMEM;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
+ err = nfp_map_ptr_record(bpf, nfp_prog,
+ prog->aux->used_maps[i]);
+ if (err) {
+ nfp_map_ptrs_forget(bpf, nfp_prog);
+ return err;
+ }
+ }
+ WARN_ON(cnt != nfp_prog->map_records_cnt);
+
+ return 0;
+}
+
+static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
@@ -70,6 +190,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
meta->insn = prog[i];
meta->n = i;
+ if (is_mbpf_indir_shift(meta))
+ meta->umin = U64_MAX;
list_add_tail(&meta->l, &nfp_prog->insns);
}
@@ -151,7 +273,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
prog->aux->offload->jited_image = nfp_prog->prog;
- return 0;
+ return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}
static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
@@ -159,6 +281,7 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog);
+ nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog);
return 0;
@@ -320,6 +443,53 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
}
}
+static unsigned long
+nfp_bpf_perf_event_copy(void *dst, const void *src,
+ unsigned long off, unsigned long len)
+{
+ memcpy(dst, src + off, len);
+ return 0;
+}
+
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
+{
+ struct cmsg_bpf_event *cbe = (void *)skb->data;
+ u32 pkt_size, data_size;
+ struct bpf_map *map;
+
+ if (skb->len < sizeof(struct cmsg_bpf_event))
+ goto err_drop;
+
+ pkt_size = be32_to_cpu(cbe->pkt_size);
+ data_size = be32_to_cpu(cbe->data_size);
+ map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
+
+ if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ goto err_drop;
+ if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
+ goto err_drop;
+
+ rcu_read_lock();
+ if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ nfp_bpf_maps_neutral_params)) {
+ rcu_read_unlock();
+ pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
+ map);
+ goto err_drop;
+ }
+
+ bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
+ &cbe->data[round_up(pkt_size, 4)], data_size,
+ cbe->data, pkt_size, nfp_bpf_perf_event_copy);
+ rcu_read_unlock();
+
+ dev_consume_skb_any(skb);
+ return 0;
+err_drop:
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+}
+
static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 06ad53ce4ad9..4bfeba7b21b2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -36,6 +36,8 @@
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include "../nfp_app.h"
+#include "../nfp_main.h"
#include "fw.h"
#include "main.h"
@@ -149,15 +151,6 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
return false;
}
- /* Rest of the checks is only if we re-parse the same insn */
- if (!meta->func_id)
- return true;
-
- if (meta->arg1.map_ptr != reg1->map_ptr) {
- pr_vlog(env, "%s: called for different map\n", fname);
- return false;
- }
-
return true;
}
@@ -216,6 +209,71 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
return -EOPNOTSUPP;
+ case BPF_FUNC_perf_event_output:
+ BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
+ NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
+ NFP_BPF_STACK != PTR_TO_STACK ||
+ NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
+
+ if (!bpf->helpers.perf_event_output) {
+ pr_vlog(env, "event_output: not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Force current CPU to make sure we can report the event
+ * wherever we get the control message from FW.
+ */
+ if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
+ (reg3->var_off.value & BPF_F_INDEX_MASK) !=
+ BPF_F_CURRENT_CPU) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
+ pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
+ tn_buf);
+ return -EOPNOTSUPP;
+ }
+
+ /* Save space in meta, we don't care about arguments other
+ * than 4th meta, shove it into arg1.
+ */
+ reg1 = cur_regs(env) + BPF_REG_4;
+
+ if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
+ reg1->type != PTR_TO_STACK &&
+ reg1->type != PTR_TO_MAP_VALUE &&
+ reg1->type != PTR_TO_PACKET) {
+ pr_vlog(env, "event_output: unsupported ptr type: %d\n",
+ reg1->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (reg1->type == PTR_TO_STACK &&
+ !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
+ return -EOPNOTSUPP;
+
+ /* Warn user that on offload NFP may return success even if map
+ * is not going to accept the event, since the event output is
+ * fully async and device won't know the state of the map.
+ * There is also FW limitation on the event length.
+ *
+ * Lost events will not show up on the perf ring, driver
+ * won't see them at all. Events may also get reordered.
+ */
+ dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
+ "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
+ pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
+
+ if (!meta->func_id)
+ break;
+
+ if (reg1->type != meta->arg1.type) {
+ pr_vlog(env, "event_output: ptr type changed: %d %d\n",
+ meta->arg1.type, reg1->type);
+ return -EINVAL;
+ }
+ break;
+
default:
pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP;
@@ -410,6 +468,30 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
static int
+nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
+
+ if (reg->type == PTR_TO_CTX) {
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
+ /* XDP ctx accesses must be 4B in size */
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_md, rx_queue_index):
+ if (nfp_prog->bpf->queue_select)
+ goto exit_check_ptr;
+ pr_vlog(env, "queue selection not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ pr_vlog(env, "unsupported store to context field\n");
+ return -EOPNOTSUPP;
+ }
+exit_check_ptr:
+ return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
+}
+
+static int
nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
struct bpf_verifier_env *env)
{
@@ -464,11 +546,19 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
if (is_mbpf_store(meta))
- return nfp_bpf_check_ptr(nfp_prog, meta, env,
- meta->insn.dst_reg);
+ return nfp_bpf_check_store(nfp_prog, meta, env);
+
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
+ if (is_mbpf_indir_shift(meta)) {
+ const struct bpf_reg_state *sreg =
+ cur_regs(env) + meta->insn.src_reg;
+
+ meta->umin = min(meta->umin, sreg->umin_value);
+ meta->umax = max(meta->umax, sreg->umax_value);
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 80df9a5d4217..4a6d2db75071 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -72,6 +72,42 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
+static int
+nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
+ struct nfp_fl_payload *nfp_flow, int act_len)
+{
+ size_t act_size = sizeof(struct nfp_fl_pre_lag);
+ struct nfp_fl_pre_lag *pre_lag;
+ struct net_device *out_dev;
+ int err;
+
+ out_dev = tcf_mirred_dev(action);
+ if (!out_dev || !netif_is_lag_master(out_dev))
+ return 0;
+
+ if (act_len + act_size > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ /* Pre_lag action must be first on action list.
+ * If other actions already exist they need pushed forward.
+ */
+ if (act_len)
+ memmove(nfp_flow->action_data + act_size,
+ nfp_flow->action_data, act_len);
+
+ pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
+ err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
+ if (err)
+ return err;
+
+ pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
+ pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ return act_size;
+}
+
static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
enum nfp_flower_tun_type tun_type)
{
@@ -88,12 +124,13 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
}
static int
-nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
- struct nfp_fl_payload *nfp_flow, bool last,
- struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
- int *tun_out_cnt)
+nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
+ const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
+ bool last, struct net_device *in_dev,
+ enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
{
size_t act_size = sizeof(struct nfp_fl_output);
+ struct nfp_flower_priv *priv = app->priv;
struct net_device *out_dev;
u16 tmp_flags;
@@ -118,6 +155,15 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
output->flags = cpu_to_be16(tmp_flags |
NFP_FL_OUT_FLAGS_USE_TUN);
output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ } else if (netif_is_lag_master(out_dev) &&
+ priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ int gid;
+
+ output->flags = cpu_to_be16(tmp_flags);
+ gid = nfp_flower_lag_get_output_id(app, out_dev);
+ if (gid < 0)
+ return gid;
+ output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else {
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
@@ -164,7 +210,7 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
struct nfp_fl_pre_tunnel *pre_tun_act;
/* Pre_tunnel action must be first on action list.
- * If other actions already exist they need pushed forward.
+ * If other actions already exist they need to be pushed forward.
*/
if (act_len)
memmove(act_data + act_size, act_data, act_len);
@@ -443,42 +489,73 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
}
static int
-nfp_flower_loop_action(const struct tc_action *a,
+nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
+ struct nfp_fl_payload *nfp_fl, int *a_len,
+ struct net_device *netdev, bool last,
+ enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
+ int *out_cnt)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_output *output;
+ int err, prelag_size;
+
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
+ tun_out_cnt);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_output);
+
+ if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ /* nfp_fl_pre_lag returns -err or size of prelag action added.
+ * This will be 0 if it is not egressing to a lag dev.
+ */
+ prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
+ if (prelag_size < 0)
+ return prelag_size;
+ else if (prelag_size > 0 && (!last || *out_cnt))
+ return -EOPNOTSUPP;
+
+ *a_len += prelag_size;
+ }
+ (*out_cnt)++;
+
+ return 0;
+}
+
+static int
+nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
- enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
+ enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
+ int *out_cnt)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
- struct nfp_fl_output *output;
int err;
if (is_tcf_gact_shot(a)) {
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
} else if (is_tcf_mirred_egress_redirect(a)) {
- if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
- return -EOPNOTSUPP;
-
- output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
- tun_out_cnt);
+ err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
+ true, tun_type, tun_out_cnt,
+ out_cnt);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_output);
} else if (is_tcf_mirred_egress_mirror(a)) {
- if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
- return -EOPNOTSUPP;
-
- output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
- tun_out_cnt);
+ err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
+ false, tun_type, tun_out_cnt,
+ out_cnt);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_output);
} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -535,11 +612,12 @@ nfp_flower_loop_action(const struct tc_action *a,
return 0;
}
-int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_action(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err, tun_out_cnt;
+ int act_len, act_cnt, err, tun_out_cnt, out_cnt;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
LIST_HEAD(actions);
@@ -550,11 +628,12 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
act_len = 0;
act_cnt = 0;
tun_out_cnt = 0;
+ out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
- &tun_type, &tun_out_cnt);
+ err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
+ &tun_type, &tun_out_cnt, &out_cnt);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 577659f332e4..cb8565222621 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -239,8 +239,10 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
+ struct nfp_flower_priv *app_priv = app->priv;
struct nfp_flower_cmsg_hdr *cmsg_hdr;
enum nfp_flower_cmsg_type_port type;
+ bool skb_stored = false;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
@@ -258,13 +260,20 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
+ break;
+ }
+ /* fall through */
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
goto out;
}
- dev_consume_skb_any(skb);
+ if (!skb_stored)
+ dev_consume_skb_any(skb);
return;
out:
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index bee4367a2c38..4a7f3510a296 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -92,6 +92,7 @@
#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
#define NFP_FL_ACTION_OPCODE_SET_UDP 14
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
+#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
#define NFP_FL_ACTION_OPCODE_NUM 32
@@ -103,6 +104,9 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+/* LAG ports */
+#define NFP_FL_LAG_OUT 0xC0DE0000
+
/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
@@ -177,6 +181,15 @@ struct nfp_fl_pop_vlan {
__be16 reserved;
};
+struct nfp_fl_pre_lag {
+ struct nfp_fl_act_head head;
+ __be16 group_id;
+ u8 lag_version[3];
+ u8 instance;
+};
+
+#define NFP_FL_PRE_LAG_VER_OFF 8
+
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -366,6 +379,7 @@ struct nfp_flower_cmsg_hdr {
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
new file mode 100644
index 000000000000..0c4c957717ea
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "main.h"
+
+/* LAG group config flags. */
+#define NFP_FL_LAG_LAST BIT(1)
+#define NFP_FL_LAG_FIRST BIT(2)
+#define NFP_FL_LAG_DATA BIT(3)
+#define NFP_FL_LAG_XON BIT(4)
+#define NFP_FL_LAG_SYNC BIT(5)
+#define NFP_FL_LAG_SWITCH BIT(6)
+#define NFP_FL_LAG_RESET BIT(7)
+
+/* LAG port state flags. */
+#define NFP_PORT_LAG_LINK_UP BIT(0)
+#define NFP_PORT_LAG_TX_ENABLED BIT(1)
+#define NFP_PORT_LAG_CHANGED BIT(2)
+
+enum nfp_fl_lag_batch {
+ NFP_FL_LAG_BATCH_FIRST,
+ NFP_FL_LAG_BATCH_MEMBER,
+ NFP_FL_LAG_BATCH_FINISHED
+};
+
+/**
+ * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
+ * @ctrl_flags: Configuration flags
+ * @reserved: Reserved for future use
+ * @ttl: Time to live of packet - host always sets to 0xff
+ * @pkt_number: Config message packet number - increment for each message
+ * @batch_ver: Batch version of messages - increment for each batch of messages
+ * @group_id: Group ID applicable
+ * @group_inst: Group instance number - increment when group is reused
+ * @members: Array of 32-bit words listing all active group members
+ */
+struct nfp_flower_cmsg_lag_config {
+ u8 ctrl_flags;
+ u8 reserved[2];
+ u8 ttl;
+ __be32 pkt_number;
+ __be32 batch_ver;
+ __be32 group_id;
+ __be32 group_inst;
+ __be32 members[];
+};
+
+/**
+ * struct nfp_fl_lag_group - list entry for each LAG group
+ * @group_id: Assigned group ID for host/kernel sync
+ * @group_inst: Group instance in case of ID reuse
+ * @list: List entry
+ * @master_ndev: Group master Netdev
+ * @dirty: Marked if the group needs synced to HW
+ * @offloaded: Marked if the group is currently offloaded to NIC
+ * @to_remove: Marked if the group should be removed from NIC
+ * @to_destroy: Marked if the group should be removed from driver
+ * @slave_cnt: Number of slaves in group
+ */
+struct nfp_fl_lag_group {
+ unsigned int group_id;
+ u8 group_inst;
+ struct list_head list;
+ struct net_device *master_ndev;
+ bool dirty;
+ bool offloaded;
+ bool to_remove;
+ bool to_destroy;
+ unsigned int slave_cnt;
+};
+
+#define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0)
+#define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0)
+#define NFP_FL_LAG_HOST_TTL 0xff
+
+/* Use this ID with zero members to ack a batch config */
+#define NFP_FL_LAG_SYNC_ID 0
+#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
+#define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */
+
+/* wait for more config */
+#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
+
+#define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */
+
+static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
+{
+ lag->pkt_num++;
+ lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
+
+ return lag->pkt_num;
+}
+
+static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
+{
+ /* LSB is not considered by firmware so add 2 for each increment. */
+ lag->batch_ver += 2;
+ lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
+
+ /* Zero is reserved by firmware. */
+ if (!lag->batch_ver)
+ lag->batch_ver += 2;
+}
+
+static struct nfp_fl_lag_group *
+nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
+{
+ struct nfp_fl_lag_group *group;
+ struct nfp_flower_priv *priv;
+ int id;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
+ NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
+ if (id < 0) {
+ nfp_flower_cmsg_warn(priv->app,
+ "No more bonding groups available\n");
+ return ERR_PTR(id);
+ }
+
+ group = kmalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
+ ida_simple_remove(&lag->ida_handle, id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ group->group_id = id;
+ group->master_ndev = master;
+ group->dirty = true;
+ group->offloaded = false;
+ group->to_remove = false;
+ group->to_destroy = false;
+ group->slave_cnt = 0;
+ group->group_inst = ++lag->global_inst;
+ list_add_tail(&group->list, &lag->group_list);
+
+ return group;
+}
+
+static struct nfp_fl_lag_group *
+nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
+ struct net_device *master)
+{
+ struct nfp_fl_lag_group *entry;
+
+ if (!master)
+ return NULL;
+
+ list_for_each_entry(entry, &lag->group_list, list)
+ if (entry->master_ndev == master)
+ return entry;
+
+ return NULL;
+}
+
+int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
+ struct net_device *master,
+ struct nfp_fl_pre_lag *pre_act)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_lag_group *group = NULL;
+ __be32 temp_vers;
+
+ mutex_lock(&priv->nfp_lag.lock);
+ group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
+ master);
+ if (!group) {
+ mutex_unlock(&priv->nfp_lag.lock);
+ return -ENOENT;
+ }
+
+ pre_act->group_id = cpu_to_be16(group->group_id);
+ temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
+ NFP_FL_PRE_LAG_VER_OFF);
+ memcpy(pre_act->lag_version, &temp_vers, 3);
+ pre_act->instance = group->group_inst;
+ mutex_unlock(&priv->nfp_lag.lock);
+
+ return 0;
+}
+
+int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_lag_group *group = NULL;
+ int group_id = -ENOENT;
+
+ mutex_lock(&priv->nfp_lag.lock);
+ group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
+ master);
+ if (group)
+ group_id = group->group_id;
+ mutex_unlock(&priv->nfp_lag.lock);
+
+ return group_id;
+}
+
+static int
+nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
+ struct net_device **active_members,
+ unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
+{
+ struct nfp_flower_cmsg_lag_config *cmsg_payload;
+ struct nfp_flower_priv *priv;
+ unsigned long int flags;
+ unsigned int size, i;
+ struct sk_buff *skb;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+ size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
+ skb = nfp_flower_cmsg_alloc(priv->app, size,
+ NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ cmsg_payload = nfp_flower_cmsg_get_data(skb);
+ flags = 0;
+
+ /* Increment batch version for each new batch of config messages. */
+ if (*batch == NFP_FL_LAG_BATCH_FIRST) {
+ flags |= NFP_FL_LAG_FIRST;
+ nfp_fl_increment_version(lag);
+ *batch = NFP_FL_LAG_BATCH_MEMBER;
+ }
+
+ /* If it is a reset msg then it is also the end of the batch. */
+ if (lag->rst_cfg) {
+ flags |= NFP_FL_LAG_RESET;
+ *batch = NFP_FL_LAG_BATCH_FINISHED;
+ }
+
+ /* To signal the end of a batch, both the switch and last flags are set
+ * and the the reserved SYNC group ID is used.
+ */
+ if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
+ flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
+ lag->rst_cfg = false;
+ cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
+ cmsg_payload->group_inst = 0;
+ } else {
+ cmsg_payload->group_id = cpu_to_be32(group->group_id);
+ cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
+ }
+
+ cmsg_payload->reserved[0] = 0;
+ cmsg_payload->reserved[1] = 0;
+ cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
+ cmsg_payload->ctrl_flags = flags;
+ cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
+ cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
+
+ for (i = 0; i < member_cnt; i++)
+ cmsg_payload->members[i] =
+ cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
+
+ nfp_ctrl_tx(priv->app->ctrl, skb);
+ return 0;
+}
+
+static void nfp_fl_lag_do_work(struct work_struct *work)
+{
+ enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
+ struct nfp_fl_lag_group *entry, *storage;
+ struct delayed_work *delayed_work;
+ struct nfp_flower_priv *priv;
+ struct nfp_fl_lag *lag;
+ int err;
+
+ delayed_work = to_delayed_work(work);
+ lag = container_of(delayed_work, struct nfp_fl_lag, work);
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ mutex_lock(&lag->lock);
+ list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
+ struct net_device *iter_netdev, **acti_netdevs;
+ struct nfp_flower_repr_priv *repr_priv;
+ int active_count = 0, slaves = 0;
+ struct nfp_repr *repr;
+ unsigned long *flags;
+
+ if (entry->to_remove) {
+ /* Active count of 0 deletes group on hw. */
+ err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
+ &batch);
+ if (!err) {
+ entry->to_remove = false;
+ entry->offloaded = false;
+ } else {
+ nfp_flower_cmsg_warn(priv->app,
+ "group delete failed\n");
+ schedule_delayed_work(&lag->work,
+ NFP_FL_LAG_DELAY);
+ continue;
+ }
+
+ if (entry->to_destroy) {
+ ida_simple_remove(&lag->ida_handle,
+ entry->group_id);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ continue;
+ }
+
+ acti_netdevs = kmalloc_array(entry->slave_cnt,
+ sizeof(*acti_netdevs), GFP_KERNEL);
+
+ /* Include sanity check in the loop. It may be that a bond has
+ * changed between processing the last notification and the
+ * work queue triggering. If the number of slaves has changed
+ * or it now contains netdevs that cannot be offloaded, ignore
+ * the group until pending notifications are processed.
+ */
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
+ if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
+ slaves = 0;
+ break;
+ }
+
+ repr = netdev_priv(iter_netdev);
+
+ if (repr->app != priv->app) {
+ slaves = 0;
+ break;
+ }
+
+ slaves++;
+ if (slaves > entry->slave_cnt)
+ break;
+
+ /* Check the ports for state changes. */
+ repr_priv = repr->app_priv;
+ flags = &repr_priv->lag_port_flags;
+
+ if (*flags & NFP_PORT_LAG_CHANGED) {
+ *flags &= ~NFP_PORT_LAG_CHANGED;
+ entry->dirty = true;
+ }
+
+ if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
+ (*flags & NFP_PORT_LAG_LINK_UP))
+ acti_netdevs[active_count++] = iter_netdev;
+ }
+ rcu_read_unlock();
+
+ if (slaves != entry->slave_cnt || !entry->dirty) {
+ kfree(acti_netdevs);
+ continue;
+ }
+
+ err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
+ active_count, &batch);
+ if (!err) {
+ entry->offloaded = true;
+ entry->dirty = false;
+ } else {
+ nfp_flower_cmsg_warn(priv->app,
+ "group offload failed\n");
+ schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
+ }
+
+ kfree(acti_netdevs);
+ }
+
+ /* End the config batch if at least one packet has been batched. */
+ if (batch == NFP_FL_LAG_BATCH_MEMBER) {
+ batch = NFP_FL_LAG_BATCH_FINISHED;
+ err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
+ if (err)
+ nfp_flower_cmsg_warn(priv->app,
+ "group batch end cmsg failed\n");
+ }
+
+ mutex_unlock(&lag->lock);
+}
+
+static int
+nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
+{
+ struct nfp_flower_cmsg_lag_config *cmsg_payload;
+
+ cmsg_payload = nfp_flower_cmsg_get_data(skb);
+ if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
+ return -EINVAL;
+
+ /* Drop cmsg retrans if storage limit is exceeded to prevent
+ * overloading. If the fw notices that expected messages have not been
+ * received in a given time block, it will request a full resync.
+ */
+ if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
+ return -ENOSPC;
+
+ __skb_queue_tail(&lag->retrans_skbs, skb);
+
+ return 0;
+}
+
+static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
+{
+ struct nfp_flower_priv *priv;
+ struct sk_buff *skb;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ while ((skb = __skb_dequeue(&lag->retrans_skbs)))
+ nfp_ctrl_tx(priv->app->ctrl, skb);
+}
+
+bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_cmsg_lag_config *cmsg_payload;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_lag_group *group_entry;
+ unsigned long int flags;
+ bool store_skb = false;
+ int err;
+
+ cmsg_payload = nfp_flower_cmsg_get_data(skb);
+ flags = cmsg_payload->ctrl_flags;
+
+ /* Note the intentional fall through below. If DATA and XON are both
+ * set, the message will stored and sent again with the rest of the
+ * unprocessed messages list.
+ */
+
+ /* Store */
+ if (flags & NFP_FL_LAG_DATA)
+ if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
+ store_skb = true;
+
+ /* Send stored */
+ if (flags & NFP_FL_LAG_XON)
+ nfp_fl_send_unprocessed(&priv->nfp_lag);
+
+ /* Resend all */
+ if (flags & NFP_FL_LAG_SYNC) {
+ /* To resend all config:
+ * 1) Clear all unprocessed messages
+ * 2) Mark all groups dirty
+ * 3) Reset NFP group config
+ * 4) Schedule a LAG config update
+ */
+
+ __skb_queue_purge(&priv->nfp_lag.retrans_skbs);
+
+ mutex_lock(&priv->nfp_lag.lock);
+ list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
+ list)
+ group_entry->dirty = true;
+
+ err = nfp_flower_lag_reset(&priv->nfp_lag);
+ if (err)
+ nfp_flower_cmsg_warn(priv->app,
+ "mem err in group reset msg\n");
+ mutex_unlock(&priv->nfp_lag.lock);
+
+ schedule_delayed_work(&priv->nfp_lag.work, 0);
+ }
+
+ return store_skb;
+}
+
+static void
+nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
+ struct nfp_fl_lag_group *group)
+{
+ group->to_remove = true;
+
+ schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
+}
+
+static int
+nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
+ struct net_device *master)
+{
+ struct nfp_fl_lag_group *group;
+
+ mutex_lock(&lag->lock);
+ group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
+ if (!group) {
+ mutex_unlock(&lag->lock);
+ return -ENOENT;
+ }
+
+ group->to_remove = true;
+ group->to_destroy = true;
+ mutex_unlock(&lag->lock);
+
+ schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
+ return 0;
+}
+
+static int
+nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *upper = info->upper_dev, *iter_netdev;
+ struct netdev_lag_upper_info *lag_upper_info;
+ struct nfp_fl_lag_group *group;
+ struct nfp_flower_priv *priv;
+ unsigned int slave_count = 0;
+ bool can_offload = true;
+ struct nfp_repr *repr;
+
+ if (!netif_is_lag_master(upper))
+ return 0;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, iter_netdev) {
+ if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
+ can_offload = false;
+ break;
+ }
+ repr = netdev_priv(iter_netdev);
+
+ /* Ensure all ports are created by the same app/on same card. */
+ if (repr->app != priv->app) {
+ can_offload = false;
+ break;
+ }
+
+ slave_count++;
+ }
+ rcu_read_unlock();
+
+ lag_upper_info = info->upper_info;
+
+ /* Firmware supports active/backup and L3/L4 hash bonds. */
+ if (lag_upper_info &&
+ lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
+ (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
+ can_offload = false;
+ nfp_flower_cmsg_warn(priv->app,
+ "Unable to offload tx_type %u hash %u\n",
+ lag_upper_info->tx_type,
+ lag_upper_info->hash_type);
+ }
+
+ mutex_lock(&lag->lock);
+ group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
+
+ if (slave_count == 0 || !can_offload) {
+ /* Cannot offload the group - remove if previously offloaded. */
+ if (group && group->offloaded)
+ nfp_fl_lag_schedule_group_remove(lag, group);
+
+ mutex_unlock(&lag->lock);
+ return 0;
+ }
+
+ if (!group) {
+ group = nfp_fl_lag_group_create(lag, upper);
+ if (IS_ERR(group)) {
+ mutex_unlock(&lag->lock);
+ return PTR_ERR(group);
+ }
+ }
+
+ group->dirty = true;
+ group->slave_cnt = slave_count;
+
+ /* Group may have been on queue for removal but is now offfloable. */
+ group->to_remove = false;
+ mutex_unlock(&lag->lock);
+
+ schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
+ return 0;
+}
+
+static int
+nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag_lower_info;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_flower_priv *priv;
+ struct nfp_repr *repr;
+ unsigned long *flags;
+
+ if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
+ return 0;
+
+ lag_lower_info = info->lower_state_info;
+ if (!lag_lower_info)
+ return 0;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+ repr = netdev_priv(netdev);
+
+ /* Verify that the repr is associated with this app. */
+ if (repr->app != priv->app)
+ return 0;
+
+ repr_priv = repr->app_priv;
+ flags = &repr_priv->lag_port_flags;
+
+ mutex_lock(&lag->lock);
+ if (lag_lower_info->link_up)
+ *flags |= NFP_PORT_LAG_LINK_UP;
+ else
+ *flags &= ~NFP_PORT_LAG_LINK_UP;
+
+ if (lag_lower_info->tx_enabled)
+ *flags |= NFP_PORT_LAG_TX_ENABLED;
+ else
+ *flags &= ~NFP_PORT_LAG_TX_ENABLED;
+
+ *flags |= NFP_PORT_LAG_CHANGED;
+ mutex_unlock(&lag->lock);
+
+ schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
+ return 0;
+}
+
+static int
+nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev;
+ struct nfp_fl_lag *lag;
+ int err;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ lag = container_of(nb, struct nfp_fl_lag, lag_nb);
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = nfp_fl_lag_changeupper_event(lag, ptr);
+ if (err)
+ return NOTIFY_BAD;
+ return NOTIFY_OK;
+ case NETDEV_CHANGELOWERSTATE:
+ err = nfp_fl_lag_changels_event(lag, netdev, ptr);
+ if (err)
+ return NOTIFY_BAD;
+ return NOTIFY_OK;
+ case NETDEV_UNREGISTER:
+ if (netif_is_bond_master(netdev)) {
+ err = nfp_fl_lag_schedule_group_delete(lag, netdev);
+ if (err)
+ return NOTIFY_BAD;
+ return NOTIFY_OK;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
+{
+ enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
+
+ lag->rst_cfg = true;
+ return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
+}
+
+void nfp_flower_lag_init(struct nfp_fl_lag *lag)
+{
+ INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
+ INIT_LIST_HEAD(&lag->group_list);
+ mutex_init(&lag->lock);
+ ida_init(&lag->ida_handle);
+
+ __skb_queue_head_init(&lag->retrans_skbs);
+
+ /* 0 is a reserved batch version so increment to first valid value. */
+ nfp_fl_increment_version(lag);
+
+ lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
+}
+
+void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
+{
+ struct nfp_fl_lag_group *entry, *storage;
+
+ cancel_delayed_work_sync(&lag->work);
+
+ __skb_queue_purge(&lag->retrans_skbs);
+
+ /* Remove all groups. */
+ mutex_lock(&lag->lock);
+ list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&lag->lock);
+ mutex_destroy(&lag->lock);
+ ida_destroy(&lag->ida_handle);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 84e3b9f5abb1..19cfa162ac65 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -185,6 +185,10 @@ nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ kfree(repr->app_priv);
+
tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
netdev_priv(netdev));
}
@@ -225,7 +229,9 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
atomic_t *replies = &priv->reify_replies;
+ struct nfp_flower_repr_priv *repr_priv;
enum nfp_port_type port_type;
+ struct nfp_repr *nfp_repr;
struct nfp_reprs *reprs;
int i, err, reify_cnt;
const u8 queue = 0;
@@ -247,12 +253,25 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
err = -ENOMEM;
goto err_reprs_clean;
}
- RCU_INIT_POINTER(reprs->reprs[i], repr);
+
+ repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
+ if (!repr_priv) {
+ err = -ENOMEM;
+ goto err_reprs_clean;
+ }
+
+ nfp_repr = netdev_priv(repr);
+ nfp_repr->app_priv = repr_priv;
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
port = nfp_port_alloc(app, port_type, repr);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -271,9 +290,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
repr->name);
@@ -318,6 +339,8 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
atomic_t *replies = &priv->reify_replies;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *nfp_repr;
struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs;
int err, reify_cnt;
@@ -344,16 +367,26 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
err = -ENOMEM;
goto err_reprs_clean;
}
- RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
+
+ repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
+ if (!repr_priv) {
+ err = -ENOMEM;
+ goto err_reprs_clean;
+ }
+
+ nfp_repr = netdev_priv(repr);
+ nfp_repr->app_priv = repr_priv;
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
err = nfp_port_init_phy_port(app->pf, app, port, i);
if (err) {
nfp_port_free(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
@@ -365,6 +398,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
@@ -373,6 +407,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
eth_tbl->ports[i].base,
phys_port);
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
phys_port, repr->name);
}
@@ -537,8 +572,22 @@ static int nfp_flower_init(struct nfp_app *app)
else
app_priv->flower_ext_feats = features;
+ /* Tell the firmware that the driver supports lag. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_balance_sync_enable", 1);
+ if (!err) {
+ app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
+ nfp_flower_lag_init(&app_priv->nfp_lag);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp, "LAG not supported by FW.\n");
+ } else {
+ goto err_cleanup_metadata;
+ }
+
return 0;
+err_cleanup_metadata:
+ nfp_flower_metadata_cleanup(app);
err_free_app_priv:
vfree(app->priv);
return err;
@@ -552,6 +601,9 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ nfp_flower_lag_cleanup(&app_priv->nfp_lag);
+
nfp_flower_metadata_cleanup(app);
vfree(app->priv);
app->priv = NULL;
@@ -618,11 +670,29 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_start(struct nfp_app *app)
{
+ struct nfp_flower_priv *app_priv = app->priv;
+ int err;
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ err = nfp_flower_lag_reset(&app_priv->nfp_lag);
+ if (err)
+ return err;
+
+ err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
+ if (err)
+ return err;
+ }
+
return nfp_tunnel_config_start(app);
}
static void nfp_flower_stop(struct nfp_app *app)
{
+ struct nfp_flower_priv *app_priv = app->priv;
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
+
nfp_tunnel_config_stop(app);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c67e1b54c614..bbe5764d26cb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -43,10 +43,13 @@
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <linux/workqueue.h>
+#include <linux/idr.h>
+struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
+#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
@@ -66,6 +69,7 @@ struct nfp_app;
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
+#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -96,6 +100,33 @@ struct nfp_mtu_conf {
};
/**
+ * struct nfp_fl_lag - Flower APP priv data for link aggregation
+ * @lag_nb: Notifier to track master/slave events
+ * @work: Work queue for writing configs to the HW
+ * @lock: Lock to protect lag_group_list
+ * @group_list: List of all master/slave groups offloaded
+ * @ida_handle: IDA to handle group ids
+ * @pkt_num: Incremented for each config packet sent
+ * @batch_ver: Incremented for each batch of config packets
+ * @global_inst: Instance allocator for groups
+ * @rst_cfg: Marker to reset HW LAG config
+ * @retrans_skbs: Cmsgs that could not be processed by HW and require
+ * retransmission
+ */
+struct nfp_fl_lag {
+ struct notifier_block lag_nb;
+ struct delayed_work work;
+ struct mutex lock;
+ struct list_head group_list;
+ struct ida ida_handle;
+ unsigned int pkt_num;
+ unsigned int batch_ver;
+ u8 global_inst;
+ bool rst_cfg;
+ struct sk_buff_head retrans_skbs;
+};
+
+/**
* struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app
* @nn: Pointer to vNIC
@@ -127,6 +158,7 @@ struct nfp_mtu_conf {
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
+ * @nfp_lag: Link aggregation data block
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -156,6 +188,15 @@ struct nfp_flower_priv {
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
+ struct nfp_fl_lag nfp_lag;
+};
+
+/**
+ * struct nfp_flower_repr_priv - Flower APP per-repr priv data
+ * @lag_port_flags: Extended port flags to record lag state of repr
+ */
+struct nfp_flower_repr_priv {
+ unsigned long lag_port_flags;
};
struct nfp_fl_key_ls {
@@ -189,9 +230,11 @@ struct nfp_fl_payload {
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
+ struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
char *action_data;
+ bool ingress_offload;
};
struct nfp_fl_stats_frame {
@@ -211,17 +254,20 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type);
-int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_action(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ struct net_device *netdev);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
struct nfp_fl_payload *
-nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
+ struct net_device *netdev, __be32 host_ctx);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
@@ -236,5 +282,14 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
+void nfp_flower_lag_init(struct nfp_fl_lag *lag);
+void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
+int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
+bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
+int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
+ struct net_device *master,
+ struct nfp_fl_pre_lag *pre_act);
+int nfp_flower_lag_get_output_id(struct nfp_app *app,
+ struct net_device *master);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index db977cf8e933..21668aa435e8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -99,14 +99,18 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
-nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
+ struct net_device *netdev, __be32 host_ctx)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flower_entry;
hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
tc_flower_cookie)
- if (flower_entry->tc_flower_cookie == tc_flower_cookie)
+ if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
+ (!netdev || flower_entry->ingress_dev == netdev) &&
+ (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
+ flower_entry->meta.host_ctx_id == host_ctx))
return flower_entry;
return NULL;
@@ -121,13 +125,11 @@ nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
flower_cookie = be64_to_cpu(stats->stats_cookie);
rcu_read_lock();
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
+ nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
+ stats->stats_con_id);
if (!nfp_flow)
goto exit_rcu_unlock;
- if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
- goto exit_rcu_unlock;
-
spin_lock(&nfp_flow->lock);
nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
@@ -317,7 +319,8 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
@@ -348,7 +351,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->stats.bytes = 0;
nfp_flow->stats.used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 114d2ab02a38..c42e64f32333 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -345,7 +345,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
{
struct nfp_fl_payload *flow_pay;
@@ -371,6 +371,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
+ flow_pay->ingress_offload = !egress;
+
return flow_pay;
err_free_mask:
@@ -402,8 +404,20 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
+ struct net_device *ingr_dev;
int err;
+ ingr_dev = egress ? NULL : netdev;
+ flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
+ if (flow_pay) {
+ /* Ignore as duplicate if it has been added by different cb. */
+ if (flow_pay->ingress_offload && egress)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+ }
+
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
@@ -413,22 +427,25 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer);
+ flow_pay = nfp_flower_allocate_new(key_layer, egress);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
+ flow_pay->ingress_dev = egress ? NULL : netdev;
+
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
tun_type);
if (err)
goto err_destroy_flow;
- err = nfp_flower_compile_action(flow, netdev, flow_pay);
+ err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay,
+ flow_pay->ingress_dev);
if (err)
goto err_destroy_flow;
@@ -462,6 +479,7 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
+ * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -470,15 +488,18 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_fl_payload *nfp_flow;
+ struct net_device *ingr_dev;
int err;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ ingr_dev = egress ? NULL : netdev;
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
- return -ENOENT;
+ return egress ? 0 : -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -505,7 +526,9 @@ err_free_flow:
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
+ * @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
+ * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -513,14 +536,21 @@ err_free_flow:
* Return: negative value on error, 0 if stats populated successfully.
*/
static int
-nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
+nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_fl_payload *nfp_flow;
+ struct net_device *ingr_dev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ ingr_dev = egress ? NULL : netdev;
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
return -EINVAL;
+ if (nfp_flow->ingress_offload && egress)
+ return 0;
+
spin_lock_bh(&nfp_flow->lock);
tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
nfp_flow->stats.pkts, nfp_flow->stats.used);
@@ -543,9 +573,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
case TC_CLSFLOWER_REPLACE:
return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower);
+ return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, flower);
+ return nfp_flower_get_stats(app, netdev, flower, egress);
}
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
new file mode 100644
index 000000000000..8b56c27931bf
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_ABI__
+#define __NFP_ABI__ 1
+
+#include <linux/types.h>
+
+#define NFP_MBOX_SYM_NAME "_abi_nfd_pf%u_mbox"
+#define NFP_MBOX_SYM_MIN_SIZE 16 /* When no data needed */
+
+#define NFP_MBOX_CMD 0x00
+#define NFP_MBOX_RET 0x04
+#define NFP_MBOX_DATA_LEN 0x08
+#define NFP_MBOX_RESERVED 0x0c
+#define NFP_MBOX_DATA 0x10
+
+/**
+ * enum nfp_mbox_cmd - PF mailbox commands
+ *
+ * @NFP_MBOX_NO_CMD: null command
+ * Used to indicate previous command has finished.
+ *
+ * @NFP_MBOX_POOL_GET: get shared buffer pool info/config
+ * Input - struct nfp_shared_buf_pool_id
+ * Output - struct nfp_shared_buf_pool_info_get
+ *
+ * @NFP_MBOX_POOL_SET: set shared buffer pool info/config
+ * Input - struct nfp_shared_buf_pool_info_set
+ * Output - None
+ *
+ * @NFP_MBOX_PCIE_ABM_ENABLE: enable PCIe-side advanced buffer management
+ * Enable advanced buffer management of the PCIe block. If ABM is disabled
+ * PCIe block maintains a very short queue of buffers and does tail drop.
+ * ABM allows more advanced buffering and priority control.
+ * Input - None
+ * Output - None
+ *
+ * @NFP_MBOX_PCIE_ABM_DISABLE: disable PCIe-side advanced buffer management
+ * Input - None
+ * Output - None
+ */
+enum nfp_mbox_cmd {
+ NFP_MBOX_NO_CMD = 0x00,
+
+ NFP_MBOX_POOL_GET = 0x01,
+ NFP_MBOX_POOL_SET = 0x02,
+
+ NFP_MBOX_PCIE_ABM_ENABLE = 0x03,
+ NFP_MBOX_PCIE_ABM_DISABLE = 0x04,
+};
+
+#define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt"
+#define NFP_SHARED_BUF_TABLE_SYM_NAME "_abi_nfd_pf%u_sb_tbl"
+
+/**
+ * struct nfp_shared_buf - NFP shared buffer description
+ * @id: numerical user-visible id of the shared buffer
+ * @size: size in bytes of the buffer
+ * @ingress_pools_count: number of ingress pools
+ * @egress_pools_count: number of egress pools
+ * @ingress_tc_count: number of ingress trafic classes
+ * @egress_tc_count: number of egress trafic classes
+ * @pool_size_unit: pool size may be in credits, each credit is
+ * @pool_size_unit bytes
+ */
+struct nfp_shared_buf {
+ __le32 id;
+ __le32 size;
+ __le16 ingress_pools_count;
+ __le16 egress_pools_count;
+ __le16 ingress_tc_count;
+ __le16 egress_tc_count;
+
+ __le32 pool_size_unit;
+};
+
+/**
+ * struct nfp_shared_buf_pool_id - shared buffer pool identification
+ * @shared_buf: shared buffer id
+ * @pool: pool index
+ */
+struct nfp_shared_buf_pool_id {
+ __le32 shared_buf;
+ __le32 pool;
+};
+
+/**
+ * struct nfp_shared_buf_pool_info_get - struct devlink_sb_pool_info mirror
+ * @pool_type: one of enum devlink_sb_pool_type
+ * @size: pool size in units of SB's @pool_size_unit
+ * @threshold_type: one of enum devlink_sb_threshold_type
+ */
+struct nfp_shared_buf_pool_info_get {
+ __le32 pool_type;
+ __le32 size;
+ __le32 threshold_type;
+};
+
+/**
+ * struct nfp_shared_buf_pool_info_set - packed args of sb_pool_set
+ * @id: pool identification info
+ * @size: pool size in units of SB's @pool_size_unit
+ * @threshold_type: one of enum devlink_sb_threshold_type
+ */
+struct nfp_shared_buf_pool_info_set {
+ struct nfp_shared_buf_pool_id id;
+ __le32 size;
+ __le32 threshold_type;
+};
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 6aedef0ad433..f28b244f4ee7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -43,6 +43,7 @@
#include "nfp_main.h"
#include "nfp_net.h"
#include "nfp_net_repr.h"
+#include "nfp_port.h"
static const struct nfp_app_type *apps[] = {
[NFP_APP_CORE_NIC] = &app_nic,
@@ -54,6 +55,9 @@ static const struct nfp_app_type *apps[] = {
#ifdef CONFIG_NFP_APP_FLOWER
[NFP_APP_FLOWER_NIC] = &app_flower,
#endif
+#ifdef CONFIG_NFP_APP_ABM_NIC
+ [NFP_APP_ACTIVE_BUFFER_MGMT_NIC] = &app_abm,
+#endif
};
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
@@ -82,6 +86,27 @@ const char *nfp_app_mip_name(struct nfp_app *app)
return nfp_mip_name(app->pf->mip);
}
+u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data)
+{
+ if (!port || !port->app || !port->app->type->port_get_stats)
+ return data;
+ return port->app->type->port_get_stats(port->app, port, data);
+}
+
+int nfp_app_port_get_stats_count(struct nfp_port *port)
+{
+ if (!port || !port->app || !port->app->type->port_get_stats_count)
+ return 0;
+ return port->app->type->port_get_stats_count(port->app, port);
+}
+
+u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data)
+{
+ if (!port || !port->app || !port->app->type->port_get_stats_strings)
+ return data;
+ return port->app->type->port_get_stats_strings(port->app, port, data);
+}
+
struct sk_buff *
nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 2d9cb2528fc7..ee74caacb015 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -57,11 +57,13 @@ enum nfp_app_id {
NFP_APP_CORE_NIC = 0x1,
NFP_APP_BPF_NIC = 0x2,
NFP_APP_FLOWER_NIC = 0x3,
+ NFP_APP_ACTIVE_BUFFER_MGMT_NIC = 0x4,
};
extern const struct nfp_app_type app_nic;
extern const struct nfp_app_type app_bpf;
extern const struct nfp_app_type app_flower;
+extern const struct nfp_app_type app_abm;
/**
* struct nfp_app_type - application definition
@@ -88,6 +90,9 @@ extern const struct nfp_app_type app_flower;
* @repr_stop: representor netdev stop callback
* @check_mtu: MTU change request on a netdev (verify it is valid)
* @repr_change_mtu: MTU change request on repr (make and verify change)
+ * @port_get_stats: get extra ethtool statistics for a port
+ * @port_get_stats_count: get count of extra statistics for a port
+ * @port_get_stats_strings: get strings for extra statistics
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
@@ -95,6 +100,7 @@ extern const struct nfp_app_type app_flower;
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
+ * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
* @repr_get: get representor netdev
@@ -129,6 +135,12 @@ struct nfp_app_type {
int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev,
int new_mtu);
+ u64 *(*port_get_stats)(struct nfp_app *app,
+ struct nfp_port *port, u64 *data);
+ int (*port_get_stats_count)(struct nfp_app *app, struct nfp_port *port);
+ u8 *(*port_get_stats_strings)(struct nfp_app *app,
+ struct nfp_port *port, u8 *data);
+
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@@ -146,6 +158,7 @@ struct nfp_app_type {
void (*sriov_disable)(struct nfp_app *app);
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
+ int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
};
@@ -370,6 +383,13 @@ static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
return 0;
}
+static inline int nfp_app_eswitch_mode_set(struct nfp_app *app, u16 mode)
+{
+ if (!app->type->eswitch_mode_set)
+ return -EOPNOTSUPP;
+ return app->type->eswitch_mode_set(app, mode);
+}
+
static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs)
{
if (!app || !app->type->sriov_enable)
@@ -393,6 +413,10 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
+u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data);
+int nfp_app_port_get_stats_count(struct nfp_port *port);
+u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data);
+
struct nfp_reprs *
nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type);
struct nfp_reprs *
@@ -410,5 +434,7 @@ void nfp_app_free(struct nfp_app *app);
int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
unsigned int id);
+int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_net *nn, unsigned int id);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index b9618c37403f..e2dfe4f168bb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -38,9 +38,8 @@
#include "nfp_net.h"
#include "nfp_port.h"
-static int
-nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
- struct nfp_net *nn, unsigned int id)
+int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
+ struct nfp_net *nn, unsigned int id)
{
int err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 5f2b2f24f4fa..f6677bc9875a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -72,8 +72,21 @@
#define OP_BR_ADDR_LO 0x007ffc00000ULL
#define OP_BR_ADDR_HI 0x10000000000ULL
-#define nfp_is_br(_insn) \
- (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
+#define OP_BR_BIT_BASE 0x0d000000000ULL
+#define OP_BR_BIT_BASE_MASK 0x0f800080300ULL
+#define OP_BR_BIT_A_SRC 0x000000000ffULL
+#define OP_BR_BIT_B_SRC 0x0000003fc00ULL
+#define OP_BR_BIT_BV 0x00000040000ULL
+#define OP_BR_BIT_SRC_LMEXTN 0x40000000000ULL
+#define OP_BR_BIT_DEFBR OP_BR_DEFBR
+#define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO
+#define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI
+
+static inline bool nfp_is_br(u64 insn)
+{
+ return (insn & OP_BR_BASE_MASK) == OP_BR_BASE ||
+ (insn & OP_BR_BIT_BASE_MASK) == OP_BR_BIT_BASE;
+}
enum br_mask {
BR_BEQ = 0x00,
@@ -161,6 +174,7 @@ enum shf_op {
SHF_OP_NONE = 0,
SHF_OP_AND = 2,
SHF_OP_OR = 5,
+ SHF_OP_ASHR = 6,
};
enum shf_sc {
@@ -183,16 +197,18 @@ enum shf_sc {
#define OP_ALU_DST_LMEXTN 0x80000000000ULL
enum alu_op {
- ALU_OP_NONE = 0x00,
- ALU_OP_ADD = 0x01,
- ALU_OP_NOT = 0x04,
- ALU_OP_ADD_2B = 0x05,
- ALU_OP_AND = 0x08,
- ALU_OP_SUB_C = 0x0d,
- ALU_OP_ADD_C = 0x11,
- ALU_OP_OR = 0x14,
- ALU_OP_SUB = 0x15,
- ALU_OP_XOR = 0x18,
+ ALU_OP_NONE = 0x00,
+ ALU_OP_ADD = 0x01,
+ ALU_OP_NOT = 0x04,
+ ALU_OP_ADD_2B = 0x05,
+ ALU_OP_AND = 0x08,
+ ALU_OP_AND_NOT_A = 0x0c,
+ ALU_OP_SUB_C = 0x0d,
+ ALU_OP_AND_NOT_B = 0x10,
+ ALU_OP_ADD_C = 0x11,
+ ALU_OP_OR = 0x14,
+ ALU_OP_SUB = 0x15,
+ ALU_OP_XOR = 0x18,
};
enum alu_dst_ab {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index eb0fc614673d..db463e20a876 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -92,7 +92,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
static int
nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
- unsigned int count)
+ unsigned int count, struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
@@ -123,7 +123,8 @@ out:
}
static int
-nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
+nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
@@ -149,6 +150,26 @@ out:
return ret;
}
+static int
+nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index, struct devlink_sb_pool_info *pool_info)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+
+ return nfp_shared_buf_pool_get(pf, sb_index, pool_index, pool_info);
+}
+
+static int
+nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index,
+ u32 size, enum devlink_sb_threshold_type threshold_type)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+
+ return nfp_shared_buf_pool_set(pf, sb_index, pool_index,
+ size, threshold_type);
+}
+
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -156,10 +177,25 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return nfp_app_eswitch_mode_get(pf->app, mode);
}
+static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ int ret;
+
+ mutex_lock(&pf->lock);
+ ret = nfp_app_eswitch_mode_set(pf->app, mode);
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
const struct devlink_ops nfp_devlink_ops = {
.port_split = nfp_devlink_port_split,
.port_unsplit = nfp_devlink_port_unsplit,
+ .sb_pool_get = nfp_devlink_sb_pool_get,
+ .sb_pool_set = nfp_devlink_sb_pool_set,
.eswitch_mode_get = nfp_devlink_eswitch_mode_get,
+ .eswitch_mode_set = nfp_devlink_eswitch_mode_set,
};
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
@@ -175,8 +211,9 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
return ret;
devlink_port_type_eth_set(&port->dl_port, port->netdev);
- if (eth_port.is_split)
- devlink_port_split_set(&port->dl_port, eth_port.label_port);
+ devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ eth_port.label_port, eth_port.is_split,
+ eth_port.label_subport);
devlink = priv_to_devlink(app->pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index c4b1f344b4da..46b76d5a726c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -55,6 +55,7 @@
#include "nfpcore/nfp6000_pcie.h"
+#include "nfp_abi.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net.h"
@@ -75,6 +76,122 @@ static const struct pci_device_id nfp_pci_device_ids[] = {
};
MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
+int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
+ unsigned int default_val)
+{
+ char name[256];
+ int err = 0;
+ u64 val;
+
+ snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
+
+ val = nfp_rtsym_read_le(pf->rtbl, name, &err);
+ if (err) {
+ if (err == -ENOENT)
+ return default_val;
+ nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
+ return err;
+ }
+
+ return val;
+}
+
+u8 __iomem *
+nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ char pf_symbol[256];
+
+ snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
+ nfp_cppcore_pcie_unit(pf->cpp));
+
+ return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
+}
+
+/* Callers should hold the devlink instance lock */
+int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
+ void *out_data, u64 out_length)
+{
+ unsigned long long addr;
+ unsigned long err_at;
+ u64 max_data_sz;
+ u32 val = 0;
+ u32 cpp_id;
+ int n, err;
+
+ if (!pf->mbox)
+ return -EOPNOTSUPP;
+
+ cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0,
+ pf->mbox->domain);
+ addr = pf->mbox->addr;
+ max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE;
+
+ /* Check if cmd field is clear */
+ err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val);
+ if (err || val) {
+ nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n",
+ cmd, val, err);
+ return err ?: -EBUSY;
+ }
+
+ in_length = min(in_length, max_data_sz);
+ n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA,
+ in_data, in_length);
+ if (n != in_length)
+ return -EIO;
+ /* Write data_len and wipe reserved */
+ err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN,
+ in_length);
+ if (err)
+ return err;
+
+ /* Read back for ordering */
+ err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val);
+ if (err)
+ return err;
+
+ /* Write cmd and wipe return value */
+ err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd);
+ if (err)
+ return err;
+
+ err_at = jiffies + 5 * HZ;
+ while (true) {
+ /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */
+ err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val);
+ if (err)
+ return err;
+ if (!val)
+ break;
+
+ if (time_is_before_eq_jiffies(err_at))
+ return -ETIMEDOUT;
+
+ msleep(5);
+ }
+
+ /* Copy output if any (could be error info, do it before reading ret) */
+ err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val);
+ if (err)
+ return err;
+
+ out_length = min_t(u32, val, min(out_length, max_data_sz));
+ n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA,
+ out_data, out_length);
+ if (n != out_length)
+ return -EIO;
+
+ /* Check if there is an error */
+ err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val);
+ if (err)
+ return err;
+ if (val)
+ return -val;
+
+ return out_length;
+}
+
static bool nfp_board_ready(struct nfp_pf *pf)
{
const char *cp;
@@ -436,6 +553,25 @@ static void nfp_fw_unload(struct nfp_pf *pf)
nfp_nsp_close(nsp);
}
+static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
+{
+ char pf_symbol[256];
+ unsigned int pf_id;
+
+ pf_id = nfp_cppcore_pcie_unit(pf->cpp);
+
+ /* Optional per-PCI PF mailbox */
+ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id);
+ pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
+ if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) {
+ nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n",
+ pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
@@ -486,6 +622,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_disable_msix;
}
+ err = nfp_resource_table_init(pf->cpp);
+ if (err)
+ goto err_cpp_free;
+
pf->hwinfo = nfp_hwinfo_read(pf->cpp);
dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
@@ -506,6 +646,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
+ err = nfp_pf_find_rtsyms(pf);
+ if (err)
+ goto err_fw_unload;
+
pf->dump_flag = NFP_DUMP_NSP_DIAG;
pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
@@ -548,6 +692,7 @@ err_fw_unload:
vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
+err_cpp_free:
nfp_cpp_free(pf->cpp);
err_disable_msix:
destroy_workqueue(pf->wq);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 42211083b51f..595b3dc280e3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -46,10 +46,10 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
+#include <net/devlink.h>
struct dentry;
struct device;
-struct devlink_ops;
struct pci_dev;
struct nfp_cpp;
@@ -60,7 +60,9 @@ struct nfp_mip;
struct nfp_net;
struct nfp_nsp_identify;
struct nfp_port;
+struct nfp_rtsym;
struct nfp_rtsym_table;
+struct nfp_shared_buf;
/**
* struct nfp_dumpspec - NFP FW dump specification structure
@@ -87,6 +89,7 @@ struct nfp_dumpspec {
* @vf_cfg_mem: Pointer to mapped VF configuration area
* @vfcfg_tbl2_area: Pointer to the CPP area for the VF config table
* @vfcfg_tbl2: Pointer to mapped VF config table
+ * @mbox: RTSym of per-PCI PF mailbox (under devlink lock)
* @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
@@ -108,6 +111,8 @@ struct nfp_dumpspec {
* @ports: Linked list of port structures (struct nfp_port)
* @wq: Workqueue for running works which need to grab @lock
* @port_refresh_work: Work entry for taking netdevs out
+ * @shared_bufs: Array of shared buffer structures if FW has any SBs
+ * @num_shared_bufs: Number of elements in @shared_bufs
* @lock: Protects all fields which may change after probe
*/
struct nfp_pf {
@@ -127,6 +132,8 @@ struct nfp_pf {
struct nfp_cpp_area *vfcfg_tbl2_area;
u8 __iomem *vfcfg_tbl2;
+ const struct nfp_rtsym *mbox;
+
struct msix_entry *irq_entries;
unsigned int limit_vfs;
@@ -158,6 +165,9 @@ struct nfp_pf {
struct workqueue_struct *wq;
struct work_struct port_refresh_work;
+ struct nfp_shared_buf *shared_bufs;
+ unsigned int num_shared_bufs;
+
struct mutex lock;
};
@@ -177,6 +187,14 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
+ unsigned int default_val);
+u8 __iomem *
+nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
+ unsigned int min_size, struct nfp_cpp_area **area);
+int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
+ void *out_data, u64 out_length);
+
enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0,
};
@@ -188,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
struct ethtool_dump *dump_param, void *dest);
+int nfp_shared_buf_register(struct nfp_pf *pf);
+void nfp_shared_buf_unregister(struct nfp_pf *pf);
+int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index bd7d8ae31e17..57cb035dcc6d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -545,6 +545,7 @@ struct nfp_net_dp {
/**
* struct nfp_net - NFP network device structure
* @dp: Datapath structure
+ * @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
@@ -597,6 +598,8 @@ struct nfp_net {
struct nfp_net_fw_version fw_ver;
+ u32 id;
+
u32 cap;
u32 max_mtu;
@@ -909,7 +912,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
-void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id);
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else
static inline void nfp_net_debugfs_create(void)
@@ -926,7 +929,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
}
static inline void
-nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
+nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
{
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1eb6549f2a54..75110c8d6a90 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1722,7 +1722,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- pkt_len -= xdp.data - orig_data;
+ pkt_len = xdp.data_end - xdp.data;
pkt_off += xdp.data - orig_data;
switch (act) {
@@ -3277,6 +3277,25 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int
+nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int n;
+
+ if (nn->port)
+ return nfp_port_get_phys_port_name(netdev, name, len);
+
+ if (nn->dp.is_vf)
+ return -EOPNOTSUPP;
+
+ n = snprintf(name, len, "n%d", nn->id);
+ if (n >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -3475,7 +3494,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = nfp_net_set_mac_address,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 67cdd8330c59..099b63d67451 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -201,7 +201,7 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek
};
-void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
{
struct dentry *queues, *tx, *rx, *xdp;
char name[20];
@@ -211,7 +211,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
return;
if (nfp_net_is_data_vnic(nn))
- sprintf(name, "vnic%d", id);
+ sprintf(name, "vnic%d", nn->id);
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index c9016419bfa0..26d1cc4e2906 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -437,7 +437,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
-static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
+__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
{
va_list args;
@@ -637,6 +637,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
nn->dp.num_tx_rings,
false);
data = nfp_mac_get_stats_strings(netdev, data);
+ data = nfp_app_port_get_stats_strings(nn->port, data);
break;
}
}
@@ -651,6 +652,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
nn->dp.num_rx_rings, nn->dp.num_tx_rings);
data = nfp_mac_get_stats(netdev, data);
+ data = nfp_app_port_get_stats(nn->port, data);
}
static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
@@ -662,7 +664,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
return nfp_vnic_get_sw_stats_count(netdev) +
nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
nn->dp.num_tx_rings) +
- nfp_mac_get_stats_count(netdev);
+ nfp_mac_get_stats_count(netdev) +
+ nfp_app_port_get_stats_count(nn->port);
default:
return -EOPNOTSUPP;
}
@@ -679,6 +682,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
else
data = nfp_mac_get_stats_strings(netdev, data);
+ data = nfp_app_port_get_stats_strings(port, data);
break;
}
}
@@ -693,6 +697,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
else
data = nfp_mac_get_stats(netdev, data);
+ data = nfp_app_port_get_stats(port, data);
}
static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
@@ -706,6 +711,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
count = nfp_vnic_get_hw_stats_count(0, 0);
else
count = nfp_mac_get_stats_count(netdev);
+ count += nfp_app_port_get_stats_count(port);
return count;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 45cd2092e498..28516eecccc8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -101,48 +101,15 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
return NULL;
}
-static int
-nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
- unsigned int default_val)
-{
- char name[256];
- int err = 0;
- u64 val;
-
- snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
-
- val = nfp_rtsym_read_le(pf->rtbl, name, &err);
- if (err) {
- if (err == -ENOENT)
- return default_val;
- nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
- return err;
- }
-
- return val;
-}
-
static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
{
- return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
+ return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
}
static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
{
- return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
- NFP_APP_CORE_NIC);
-}
-
-static u8 __iomem *
-nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
- unsigned int min_size, struct nfp_cpp_area **area)
-{
- char pf_symbol[256];
-
- snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
- nfp_cppcore_pcie_unit(pf->cpp));
-
- return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
+ return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
+ NFP_APP_CORE_NIC);
}
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
@@ -211,11 +178,13 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
+ nn->id = id;
+
err = nfp_net_init(nn);
if (err)
return err;
- nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
+ nfp_net_debugfs_vnic_add(nn, pf->ddir);
if (nn->port) {
err = nfp_devlink_port_register(pf->app, nn->port);
@@ -379,9 +348,8 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (!nfp_app_needs_ctrl_vnic(pf->app))
return 0;
- ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
- NFP_PF_CSR_SLICE_SIZE,
- &pf->ctrl_vnic_bar);
+ ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
+ NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) {
nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
err = PTR_ERR(ctrl_bar);
@@ -507,8 +475,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
int err;
min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
- mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
- min_size, &pf->data_vnic_bar);
+ mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
+ min_size, &pf->data_vnic_bar);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
return PTR_ERR(mem);
@@ -528,10 +496,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
}
}
- pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
- "_pf%d_net_vf_bar",
- NFP_NET_CFG_BAR_SZ *
- pf->limit_vfs, &pf->vf_cfg_bar);
+ pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
+ NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
+ &pf->vf_cfg_bar);
if (IS_ERR(pf->vf_cfg_mem)) {
if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
err = PTR_ERR(pf->vf_cfg_mem);
@@ -541,9 +508,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
}
min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
- pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2",
- "_pf%d_net_vf_cfg2",
- min_size, &pf->vfcfg_tbl2_area);
+ pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
+ "_pf%d_net_vf_cfg2",
+ min_size, &pf->vfcfg_tbl2_area);
if (IS_ERR(pf->vfcfg_tbl2)) {
if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
err = PTR_ERR(pf->vfcfg_tbl2);
@@ -763,6 +730,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_app_clean;
+ err = nfp_shared_buf_register(pf);
+ if (err)
+ goto err_devlink_unreg;
+
mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
@@ -796,6 +767,8 @@ err_free_vnics:
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock);
+ nfp_shared_buf_unregister(pf);
+err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
devlink_unregister(devlink);
err_app_clean:
@@ -823,6 +796,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
mutex_unlock(&pf->lock);
+ nfp_shared_buf_unregister(pf);
devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 0cd077addb26..d7b712f6362f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -277,6 +277,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_set_features = nfp_port_set_features,
+ .ndo_set_mac_address = eth_mac_addr,
};
static void nfp_repr_clean(struct nfp_repr *repr)
@@ -348,18 +349,24 @@ err_clean:
return err;
}
-static void nfp_repr_free(struct nfp_repr *repr)
+static void __nfp_repr_free(struct nfp_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
}
-struct net_device *nfp_repr_alloc(struct nfp_app *app)
+void nfp_repr_free(struct net_device *netdev)
+{
+ __nfp_repr_free(netdev_priv(netdev));
+}
+
+struct net_device *
+nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs)
{
struct net_device *netdev;
struct nfp_repr *repr;
- netdev = alloc_etherdev(sizeof(*repr));
+ netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs);
if (!netdev)
return NULL;
@@ -380,12 +387,12 @@ err_free_netdev:
return NULL;
}
-static void nfp_repr_clean_and_free(struct nfp_repr *repr)
+void nfp_repr_clean_and_free(struct nfp_repr *repr)
{
nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
repr->netdev->name);
nfp_repr_clean(repr);
- nfp_repr_free(repr);
+ __nfp_repr_free(repr);
}
void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index a621e8ff528e..1bf2b18109ab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -76,6 +76,7 @@ struct nfp_repr_pcpu_stats {
* @port: Port of representor
* @app: APP handle
* @stats: Statistic of packets hitting CPU
+ * @app_priv: Pointer for APP data
*/
struct nfp_repr {
struct net_device *netdev;
@@ -83,6 +84,7 @@ struct nfp_repr {
struct nfp_port *port;
struct nfp_app *app;
struct nfp_repr_pcpu_stats __percpu *stats;
+ void *app_priv;
};
/**
@@ -123,11 +125,18 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
-struct net_device *nfp_repr_alloc(struct nfp_app *app);
+void nfp_repr_free(struct net_device *netdev);
+struct net_device *
+nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs);
+void nfp_repr_clean_and_free(struct nfp_repr *repr);
void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
int nfp_reprs_resync_phys_ports(struct nfp_app *app);
+static inline struct net_device *nfp_repr_alloc(struct nfp_app *app)
+{
+ return nfp_repr_alloc_mqs(app, 1, 1);
+}
#endif /* NFP_NET_REPR_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index b802a1d55449..68928c86b698 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -283,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_net_info(nn);
vf->ddir = nfp_net_debugfs_device_add(pdev);
- nfp_net_debugfs_vnic_add(nn, vf->ddir, 0);
+ nfp_net_debugfs_vnic_add(nn, vf->ddir);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 7bd8be5c833b..9c1298114c70 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -181,7 +181,11 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
eth_port->label_subport);
break;
case NFP_PORT_PF_PORT:
- n = snprintf(name, len, "pf%d", port->pf_id);
+ if (!port->pf_split)
+ n = snprintf(name, len, "pf%d", port->pf_id);
+ else
+ n = snprintf(name, len, "pf%ds%d", port->pf_id,
+ port->pf_split_id);
break;
case NFP_PORT_VF_PORT:
n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id);
@@ -218,6 +222,8 @@ int nfp_port_configure(struct net_device *netdev, bool configed)
eth_port = __nfp_port_get_eth_port(port);
if (!eth_port)
return 0;
+ if (port->eth_forced)
+ return 0;
err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed);
return err < 0 && err != -EOPNOTSUPP ? err : 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index fa7e669a969c..51f10ae2d53e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -77,10 +77,13 @@ enum nfp_port_flags {
* @app: backpointer to the app structure
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
+ * @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
+ * @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
+ * @pf_split_id:for %NFP_PORT_PF_PORT ID of PCI PF vNIC (valid if @pf_split)
* @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory
* @port_list: entry on pf's list of ports
*/
@@ -99,6 +102,7 @@ struct nfp_port {
/* NFP_PORT_PHYS_PORT */
struct {
unsigned int eth_id;
+ bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
};
@@ -106,6 +110,8 @@ struct nfp_port {
struct {
unsigned int pf_id;
unsigned int vf_id;
+ bool pf_split;
+ unsigned int pf_split_id;
u8 __iomem *vnic;
};
};
@@ -116,6 +122,8 @@ struct nfp_port {
extern const struct ethtool_ops nfp_port_ethtool_ops;
extern const struct switchdev_ops nfp_port_switchdev_ops;
+__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...);
+
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
new file mode 100644
index 000000000000..0ecd83705368
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfp_abi.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+
+static u32 nfp_shared_buf_pool_unit(struct nfp_pf *pf, unsigned int sb)
+{
+ __le32 sb_id = cpu_to_le32(sb);
+ unsigned int i;
+
+ for (i = 0; i < pf->num_shared_bufs; i++)
+ if (pf->shared_bufs[i].id == sb_id)
+ return le32_to_cpu(pf->shared_bufs[i].pool_size_unit);
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct nfp_shared_buf_pool_info_get get_data;
+ struct nfp_shared_buf_pool_id id = {
+ .shared_buf = cpu_to_le32(sb),
+ .pool = cpu_to_le32(pool_index),
+ };
+ unsigned int unit_size;
+ int n;
+
+ unit_size = nfp_shared_buf_pool_unit(pf, sb);
+ if (!unit_size)
+ return -EINVAL;
+
+ n = nfp_mbox_cmd(pf, NFP_MBOX_POOL_GET, &id, sizeof(id),
+ &get_data, sizeof(get_data));
+ if (n < 0)
+ return n;
+ if (n < sizeof(get_data))
+ return -EIO;
+
+ pool_info->pool_type = le32_to_cpu(get_data.pool_type);
+ pool_info->threshold_type = le32_to_cpu(get_data.threshold_type);
+ pool_info->size = le32_to_cpu(get_data.size) * unit_size;
+
+ return 0;
+}
+
+int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct nfp_shared_buf_pool_info_set set_data = {
+ .id = {
+ .shared_buf = cpu_to_le32(sb),
+ .pool = cpu_to_le32(pool_index),
+ },
+ .threshold_type = cpu_to_le32(threshold_type),
+ };
+ unsigned int unit_size;
+
+ unit_size = nfp_shared_buf_pool_unit(pf, sb);
+ if (!unit_size || size % unit_size)
+ return -EINVAL;
+ set_data.size = cpu_to_le32(size / unit_size);
+
+ return nfp_mbox_cmd(pf, NFP_MBOX_POOL_SET, &set_data, sizeof(set_data),
+ NULL, 0);
+}
+
+int nfp_shared_buf_register(struct nfp_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ unsigned int i, num_entries, entry_sz;
+ struct nfp_cpp_area *sb_desc_area;
+ u8 __iomem *sb_desc;
+ int n, err;
+
+ if (!pf->mbox)
+ return 0;
+
+ n = nfp_pf_rtsym_read_optional(pf, NFP_SHARED_BUF_COUNT_SYM_NAME, 0);
+ if (n <= 0)
+ return n;
+ num_entries = n;
+
+ sb_desc = nfp_pf_map_rtsym(pf, "sb_tbl", NFP_SHARED_BUF_TABLE_SYM_NAME,
+ num_entries * sizeof(pf->shared_bufs[0]),
+ &sb_desc_area);
+ if (IS_ERR(sb_desc))
+ return PTR_ERR(sb_desc);
+
+ entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries;
+
+ pf->shared_bufs = kmalloc_array(num_entries, sizeof(pf->shared_bufs[0]),
+ GFP_KERNEL);
+ if (!pf->shared_bufs) {
+ err = -ENOMEM;
+ goto err_release_area;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ struct nfp_shared_buf *sb = &pf->shared_bufs[i];
+
+ /* Entries may be larger in future FW */
+ memcpy_fromio(sb, sb_desc + i * entry_sz, sizeof(*sb));
+
+ err = devlink_sb_register(devlink,
+ le32_to_cpu(sb->id),
+ le32_to_cpu(sb->size),
+ le16_to_cpu(sb->ingress_pools_count),
+ le16_to_cpu(sb->egress_pools_count),
+ le16_to_cpu(sb->ingress_tc_count),
+ le16_to_cpu(sb->egress_tc_count));
+ if (err)
+ goto err_unreg_prev;
+ }
+ pf->num_shared_bufs = num_entries;
+
+ nfp_cpp_area_release_free(sb_desc_area);
+
+ return 0;
+
+err_unreg_prev:
+ while (i--)
+ devlink_sb_unregister(devlink,
+ le32_to_cpu(pf->shared_bufs[i].id));
+ kfree(pf->shared_bufs);
+err_release_area:
+ nfp_cpp_area_release_free(sb_desc_area);
+ return err;
+}
+
+void nfp_shared_buf_unregister(struct nfp_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ unsigned int i;
+
+ for (i = 0; i < pf->num_shared_bufs; i++)
+ devlink_sb_unregister(devlink,
+ le32_to_cpu(pf->shared_bufs[i].id));
+ kfree(pf->shared_bufs);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index ced62d112aa2..f44d0a857314 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -94,6 +94,8 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
/* MAC Statistics Accumulator */
#define NFP_RESOURCE_MAC_STATISTICS "mac.stat"
+int nfp_resource_table_init(struct nfp_cpp *cpp);
+
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index cd678323bacb..749655c329b2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -933,7 +933,6 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
u32 *wrptr32 = kernel_vaddr;
const u32 __iomem *rdptr32;
int n, width;
- bool is_64;
priv = nfp_cpp_area_priv(area);
rdptr64 = priv->iomem + offset;
@@ -943,10 +942,15 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
return -EFAULT;
width = priv->width.read;
-
if (width <= 0)
return -EINVAL;
+ /* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) &&
+ priv->action == NFP_CPP_ACTION_RW &&
+ (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
+ width = TARGET_WIDTH_32;
+
/* Unaligned? Translate to an explicit access */
if ((priv->offset + offset) & (width - 1))
return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
@@ -956,36 +960,29 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
priv->offset + offset,
kernel_vaddr, length, width);
- is_64 = width == TARGET_WIDTH_64;
-
- /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
- if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
- priv->action == NFP_CPP_ACTION_RW)
- is_64 = false;
+ if (WARN_ON(!priv->bar))
+ return -EFAULT;
- if (is_64) {
- if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
- return -EINVAL;
- } else {
+ switch (width) {
+ case TARGET_WIDTH_32:
if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
return -EINVAL;
- }
- if (WARN_ON(!priv->bar))
- return -EFAULT;
+ for (n = 0; n < length; n += sizeof(u32))
+ *wrptr32++ = __raw_readl(rdptr32++);
+ return n;
+#ifdef __raw_readq
+ case TARGET_WIDTH_64:
+ if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
+ return -EINVAL;
- if (is_64)
-#ifndef __raw_readq
- return -EINVAL;
-#else
for (n = 0; n < length; n += sizeof(u64))
*wrptr64++ = __raw_readq(rdptr64++);
+ return n;
#endif
- else
- for (n = 0; n < length; n += sizeof(u32))
- *wrptr32++ = __raw_readl(rdptr32++);
-
- return n;
+ default:
+ return -EINVAL;
+ }
}
static int
@@ -999,7 +996,6 @@ nfp6000_area_write(struct nfp_cpp_area *area,
struct nfp6000_area_priv *priv;
u32 __iomem *wrptr32;
int n, width;
- bool is_64;
priv = nfp_cpp_area_priv(area);
wrptr64 = priv->iomem + offset;
@@ -1009,10 +1005,15 @@ nfp6000_area_write(struct nfp_cpp_area *area,
return -EFAULT;
width = priv->width.write;
-
if (width <= 0)
return -EINVAL;
+ /* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW &&
+ (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
+ width = TARGET_WIDTH_32;
+
/* Unaligned? Translate to an explicit access */
if ((priv->offset + offset) & (width - 1))
return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
@@ -1022,40 +1023,33 @@ nfp6000_area_write(struct nfp_cpp_area *area,
priv->offset + offset,
kernel_vaddr, length, width);
- is_64 = width == TARGET_WIDTH_64;
-
- /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
- if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
- priv->action == NFP_CPP_ACTION_RW)
- is_64 = false;
+ if (WARN_ON(!priv->bar))
+ return -EFAULT;
- if (is_64) {
- if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
- return -EINVAL;
- } else {
+ switch (width) {
+ case TARGET_WIDTH_32:
if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
return -EINVAL;
- }
- if (WARN_ON(!priv->bar))
- return -EFAULT;
+ for (n = 0; n < length; n += sizeof(u32)) {
+ __raw_writel(*rdptr32++, wrptr32++);
+ wmb();
+ }
+ return n;
+#ifdef __raw_writeq
+ case TARGET_WIDTH_64:
+ if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
+ return -EINVAL;
- if (is_64)
-#ifndef __raw_writeq
- return -EINVAL;
-#else
for (n = 0; n < length; n += sizeof(u64)) {
__raw_writeq(*rdptr64++, wrptr64++);
wmb();
}
+ return n;
#endif
- else
- for (n = 0; n < length; n += sizeof(u32)) {
- __raw_writel(*rdptr32++, wrptr32++);
- wmb();
- }
-
- return n;
+ default:
+ return -EINVAL;
+ }
}
struct nfp6000_explicit_priv {
@@ -1330,6 +1324,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
/* Finished with card initialization. */
dev_info(&pdev->dev,
"Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
+ pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index c8f2c064cce3..b0da3d436850 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -87,6 +87,11 @@ struct resource;
#define NFP_CPP_TARGET_ID_MASK 0x1f
+#define NFP_CPP_ATOMIC_RD(target, island) \
+ NFP_CPP_ISLAND_ID((target), 3, 0, (island))
+#define NFP_CPP_ATOMIC_WR(target, island) \
+ NFP_CPP_ISLAND_ID((target), 4, 0, (island))
+
/**
* NFP_CPP_ID() - pack target, token, and action into a CPP ID.
* @target: NFP CPP target id
@@ -295,6 +300,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
+ unsigned long long address);
/**
* nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index cb28ac03e4ca..c88bf673cb76 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -59,6 +59,11 @@ static u32 nfp_mutex_unlocked(u16 interface)
return (u32)interface << 16 | 0x0000;
}
+static u32 nfp_mutex_owner(u32 val)
+{
+ return val >> 16;
+}
+
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
@@ -351,3 +356,43 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
+
+/**
+ * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ *
+ * Release lock if held by local system. Extreme care is advised, call only
+ * when no local lock users can exist.
+ *
+ * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex
+ */
+int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
+ unsigned long long address)
+{
+ const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ u16 interface = nfp_cpp_interface(cpp);
+ int err;
+ u32 tmp;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return err;
+
+ /* Check lock */
+ err = nfp_cpp_readl(cpp, mur, address, &tmp);
+ if (err < 0)
+ return err;
+
+ if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface)
+ return 0;
+
+ /* Bust the lock */
+ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface));
+ if (err < 0)
+ return err;
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index c9724fb7ea4b..df599d5b6bb3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -100,6 +100,8 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
int *error);
+int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name,
+ u64 value);
u8 __iomem *
nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
unsigned int min_size, struct nfp_cpp_area **area);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index 7e14725055c7..2dd89dba9311 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -338,3 +338,62 @@ u64 nfp_resource_size(struct nfp_resource *res)
{
return res->size;
}
+
+/**
+ * nfp_resource_table_init() - Run initial checks on the resource table
+ * @cpp: NFP CPP handle
+ *
+ * Start-of-day init procedure for resource table. Must be called before
+ * any local resource table users may exist.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int nfp_resource_table_init(struct nfp_cpp *cpp)
+{
+ struct nfp_cpp_mutex *dev_mutex;
+ int i, err;
+
+ err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE);
+ if (err < 0) {
+ nfp_err(cpp, "Error: failed to reclaim resource table mutex\n");
+ return err;
+ }
+ if (err)
+ nfp_warn(cpp, "Warning: busted main resource table mutex\n");
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex)
+ return -ENOMEM;
+
+ if (nfp_cpp_mutex_lock(dev_mutex)) {
+ nfp_err(cpp, "Error: failed to claim resource table mutex\n");
+ nfp_cpp_mutex_free(dev_mutex);
+ return -EINVAL;
+ }
+
+ /* Resource 0 is the dev_mutex, start from 1 */
+ for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ u64 addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr);
+ if (err < 0) {
+ nfp_err(cpp,
+ "Error: failed to reclaim resource %d mutex\n",
+ i);
+ goto err_unlock;
+ }
+ if (err)
+ nfp_warn(cpp, "Warning: busted resource %d mutex\n", i);
+ }
+
+ err = 0;
+err_unlock:
+ nfp_cpp_mutex_unlock(dev_mutex);
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 46107aefad1c..9e34216578da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -286,6 +286,49 @@ exit:
return val;
}
+/**
+ * nfp_rtsym_write_le() - Write an unsigned scalar value to a symbol
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ * @value: Value to write
+ *
+ * Lookup a symbol and write a value to it. Symbol can be 4 or 8 bytes in size.
+ * If 4 bytes then the lower 32-bits of 'value' are used. Value will be
+ * written as simple little-endian unsigned value.
+ *
+ * Return: 0 on success or error code.
+ */
+int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name,
+ u64 value)
+{
+ const struct nfp_rtsym *sym;
+ int err;
+ u32 id;
+
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym)
+ return -ENOENT;
+
+ id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
+
+ switch (sym->size) {
+ case 4:
+ err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value);
+ break;
+ case 8:
+ err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value);
+ break;
+ default:
+ nfp_err(rtbl->cpp,
+ "rtsym '%s' unsupported or non-scalar size: %lld\n",
+ name, sym->size);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
u8 __iomem *
nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
unsigned int min_size, struct nfp_cpp_area **area)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 6cec2a6a3dcc..7503aa222392 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -146,8 +146,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
memcpy(adapter->mdump.md_template, addr, size);
} else {
- dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
- "err_code : %d, requested_size : %d, actual_size : %d\n ",
+ dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
cmd.rsp.cmd, size, cmd.rsp.arg2);
}
pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
@@ -180,8 +179,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
if ((err == NX_RCODE_CMD_INVALID) ||
(err == NX_RCODE_CMD_NOT_IMPL)) {
dev_info(&adapter->pdev->dev,
- "Flashed firmware version does not support minidump, "
- "minimum version required is [ %u.%u.%u ].\n ",
+ "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
NX_MD_SUPPORT_SUBVERSION);
}
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index c70cf2ad81c0..a0acb94d65f0 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
+ qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index e07460a68d30..00db3401b898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -92,6 +92,8 @@ struct qed_eth_cb_ops;
struct qed_dev_info;
union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
+enum qed_mfw_tlv_type;
+union qed_mfw_tlv_data;
/* helpers */
#define QED_MFW_GET_FIELD(name, field) \
@@ -439,6 +441,59 @@ struct qed_fw_data {
u32 init_ops_size;
};
+enum qed_mf_mode_bit {
+ /* Supports PF-classification based on tag */
+ QED_MF_OVLAN_CLSS,
+
+ /* Supports PF-classification based on MAC */
+ QED_MF_LLH_MAC_CLSS,
+
+ /* Supports PF-classification based on protocol type */
+ QED_MF_LLH_PROTO_CLSS,
+
+ /* Requires a default PF to be set */
+ QED_MF_NEED_DEF_PF,
+
+ /* Allow LL2 to multicast/broadcast */
+ QED_MF_LL2_NON_UNICAST,
+
+ /* Allow Cross-PF [& child VFs] Tx-switching */
+ QED_MF_INTER_PF_SWITCH,
+
+ /* Unified Fabtic Port support enabled */
+ QED_MF_UFP_SPECIFIC,
+
+ /* Disable Accelerated Receive Flow Steering (aRFS) */
+ QED_MF_DISABLE_ARFS,
+
+ /* Use vlan for steering */
+ QED_MF_8021Q_TAGGING,
+
+ /* Use stag for steering */
+ QED_MF_8021AD_TAGGING,
+
+ /* Allow DSCP to TC mapping */
+ QED_MF_DSCP_TO_TC_MAP,
+};
+
+enum qed_ufp_mode {
+ QED_UFP_MODE_ETS,
+ QED_UFP_MODE_VNIC_BW,
+ QED_UFP_MODE_UNKNOWN
+};
+
+enum qed_ufp_pri_type {
+ QED_UFP_PRI_OS,
+ QED_UFP_PRI_VNIC,
+ QED_UFP_PRI_UNKNOWN
+};
+
+struct qed_ufp_info {
+ enum qed_ufp_pri_type pri_type;
+ enum qed_ufp_mode mode;
+ u8 tc;
+};
+
enum BAR_ID {
BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */
@@ -460,6 +515,10 @@ struct qed_simd_fp_handler {
void (*func)(void *);
};
+enum qed_slowpath_wq_flag {
+ QED_SLOWPATH_MFW_TLV_REQ,
+};
+
struct qed_hwfn {
struct qed_dev *cdev;
u8 my_id; /* ID inside the PF */
@@ -547,6 +606,8 @@ struct qed_hwfn {
struct qed_dcbx_info *p_dcbx_info;
+ struct qed_ufp_info ufp_info;
+
struct qed_dmae_info dmae_info;
/* QM init */
@@ -587,6 +648,9 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
+ struct workqueue_struct *slowpath_wq;
+ struct delayed_work slowpath_task;
+ unsigned long slowpath_task_flags;
};
struct pci_params {
@@ -669,10 +733,8 @@ struct qed_dev {
u8 num_funcs_in_port;
u8 path_id;
- enum qed_mf_mode mf_mode;
-#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
-#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
-#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
+
+ unsigned long mf_bits;
int pcie_width;
int pcie_speed;
@@ -853,5 +915,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
+ enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_data);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 820b226d6ff8..b5b5ff725426 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,6 +47,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_rdma.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
@@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
p_mgr->srq_count = num_srqs;
}
-static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -936,14 +937,13 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
u32 size = min_t(u32, total_size, psz);
void **p_virt = &p_mngr->t2[i].p_virt;
- *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size,
- &p_mngr->t2[i].p_phys, GFP_KERNEL);
+ *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size, &p_mngr->t2[i].p_phys,
+ GFP_KERNEL);
if (!p_mngr->t2[i].p_virt) {
rc = -ENOMEM;
goto t2_fail;
}
- memset(*p_virt, 0, size);
p_mngr->t2[i].size = size;
total_size -= size;
}
@@ -2071,7 +2071,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
u32 num_cons, num_qps, num_srqs;
enum protocol_type proto;
- num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+ num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index a4e95869889f..758a8b4c0de8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type);
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 449777f21237..8f31406ec894 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
u32 pri_tc_tbl, int count, u8 dcbx_version)
{
enum dcbx_protocol_type type;
+ bool enable, ieee, eth_tlv;
u8 tc, priority_map;
- bool enable, ieee;
u16 protocol_id;
int priority;
int i;
@@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ eth_tlv = false;
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !(type == DCBX_PROTOCOL_ETH);
+ if (type == DCBX_PROTOCOL_ETH) {
+ enable = false;
+ eth_tlv = true;
+ } else {
+ enable = true;
+ }
qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
+ /* If Eth TLV is not detected, use UFP TC as default TC */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv)
+ p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 4926c5532fba..b9ec460dd996 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -183,16 +183,9 @@ enum platform_ids {
MAX_PLATFORM_IDS
};
-struct chip_platform_defs {
- u8 num_ports;
- u8 num_pfs;
- u8 num_vfs;
-};
-
/* Chip constant definitions */
struct chip_defs {
const char *name;
- struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
};
/* Platform constant definitions */
@@ -317,6 +310,11 @@ struct phy_defs {
u32 tbus_data_hi_addr;
};
+/* Split type definitions */
+struct split_type_defs {
+ const char *name;
+};
+
/******************************** Constants **********************************/
#define MAX_LCIDS 320
@@ -419,6 +417,7 @@ struct phy_defs {
#define NUM_RSS_MEM_TYPES 5
#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_NAME_LEN 3
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
@@ -468,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "bb",
- {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
- {0, 0, 0},
- {0, 0, 0},
- {0, 0, 0} } },
- { "ah",
- {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
- {0, 0, 0},
- {0, 0, 0},
- {0, 0, 0} } },
- { "reserved",
- {{0, 0, 0},
- {0, 0, 0},
- {0, 0, 0},
- {0, 0, 0} } }
+ {"bb"},
+ {"ah"},
+ {"reserved"},
};
/* Storm constant definitions array */
@@ -1587,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = {
{{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0, 0}, 0, 1, false, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, false, 0, 0},
/* DBG_GRC_PARAM_DUMP_NIG */
{{1, 1, 1}, 0, 1, false, false, 0, 1},
@@ -1744,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = {
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
};
+static struct split_type_defs s_split_type_defs[] = {
+ /* SPLIT_TYPE_NONE */
+ {"eng"},
+
+ /* SPLIT_TYPE_PORT */
+ {"port"},
+
+ /* SPLIT_TYPE_PF */
+ {"pf"},
+
+ /* SPLIT_TYPE_PORT_PF */
+ {"port"},
+
+ /* SPLIT_TYPE_VF */
+ {"vf"}
+};
+
/**************************** Private Functions ******************************/
/* Reads and returns a single dword from the specified unaligned buffer */
@@ -1780,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 num_pfs = 0, max_pfs_per_port = 0;
if (dev_data->initialized)
return DBG_STATUS_OK;
+ /* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
dev_data->mode_enable[MODE_K2] = 1;
+ dev_data->num_vfs = MAX_NUM_VFS_K2;
+ num_pfs = MAX_NUM_PFS_K2;
+ max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB;
dev_data->mode_enable[MODE_BB] = 1;
+ dev_data->num_vfs = MAX_NUM_VFS_BB;
+ num_pfs = MAX_NUM_PFS_BB;
+ max_pfs_per_port = MAX_NUM_PFS_BB;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
+ /* Set platofrm */
dev_data->platform_id = PLATFORM_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
+ /* Set port mode */
+ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+ case 0:
+ dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
+ break;
+ case 1:
+ dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
+ break;
+ case 2:
+ dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
+ break;
+ }
+
+ /* Set 100G mode */
+ if (dev_data->chip_id == CHIP_BB &&
+ qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
+ dev_data->mode_enable[MODE_100G] = 1;
+
+ /* Set number of ports */
+ if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
+ dev_data->mode_enable[MODE_100G])
+ dev_data->num_ports = 1;
+ else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
+ dev_data->num_ports = 2;
+ else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
+ dev_data->num_ports = 4;
+
+ /* Set number of PFs per port */
+ dev_data->num_pfs_per_port = min_t(u32,
+ num_pfs / dev_data->num_ports,
+ max_pfs_per_port);
+
/* Initializes the GRC parameters */
qed_dbg_grc_init_params(p_hwfn);
dev_data->use_dmae = true;
- dev_data->num_regs_read = 0;
dev_data->initialized = 1;
return DBG_STATUS_OK;
@@ -1820,9 +1864,9 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
/* Reads the FW info structure for the specified Storm from the chip,
* and writes it to the specified fw_info pointer.
*/
-static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 storm_id, struct fw_info *fw_info)
+static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 storm_id, struct fw_info *fw_info)
{
struct storm_defs *storm = &s_storm_defs[storm_id];
struct fw_info_location fw_info_location;
@@ -1944,45 +1988,29 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
struct fw_info fw_info = { {0}, {0} };
u32 offset = 0;
if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
- /* Read FW image/version from PRAM in a non-reset SEMI */
- bool found = false;
- u8 storm_id;
-
- for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
- storm_id++) {
- struct storm_defs *storm = &s_storm_defs[storm_id];
-
- /* Read FW version/image */
- if (dev_data->block_in_reset[storm->block_id])
- continue;
-
- /* Read FW info for the current Storm */
- qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
-
- /* Create FW version/image strings */
- if (snprintf(fw_ver_str, sizeof(fw_ver_str),
- "%d_%d_%d_%d", fw_info.ver.num.major,
- fw_info.ver.num.minor, fw_info.ver.num.rev,
- fw_info.ver.num.eng) < 0)
- DP_NOTICE(p_hwfn,
- "Unexpected debug error: invalid FW version string\n");
- switch (fw_info.ver.image_id) {
- case FW_IMG_MAIN:
- strcpy(fw_img_str, "main");
- break;
- default:
- strcpy(fw_img_str, "unknown");
- break;
- }
-
- found = true;
+ /* Read FW info from chip */
+ qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
+
+ /* Create FW version/image strings */
+ if (snprintf(fw_ver_str, sizeof(fw_ver_str),
+ "%d_%d_%d_%d", fw_info.ver.num.major,
+ fw_info.ver.num.minor, fw_info.ver.num.rev,
+ fw_info.ver.num.eng) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
}
}
@@ -2411,20 +2439,21 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* The following parameters are dumped:
- * - count: no. of dumped entries
- * - split: split type
- * - id: split ID (dumped only if split_id >= 0)
+ * - count: no. of dumped entries
+ * - split_type: split type
+ * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
* - param_name: user parameter value (dumped only if param_name != NULL
* and param_val != NULL).
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
u32 num_reg_entries,
- const char *split_type,
- int split_id,
+ enum init_split_types split_type,
+ u8 split_id,
const char *param_name, const char *param_val)
{
- u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
+ u8 num_params = 2 +
+ (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
u32 offset = 0;
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -2432,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
offset += qed_dump_num_param(dump_buf + offset,
dump, "count", num_reg_entries);
offset += qed_dump_str_param(dump_buf + offset,
- dump, "split", split_type);
- if (split_id >= 0)
+ dump, "split",
+ s_split_type_defs[split_type].name);
+ if (split_type != SPLIT_TYPE_NONE)
offset += qed_dump_num_param(dump_buf + offset,
dump, "id", split_id);
if (param_name && param_val)
@@ -2462,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
- bool dump, u32 addr, u32 len, bool wide_bus)
+ bool dump, u32 addr, u32 len, bool wide_bus,
+ enum init_split_types split_type,
+ u8 split_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
if (!dump)
return len;
@@ -2480,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
dev_data->num_regs_read = 0;
}
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ port_id = split_id;
+ break;
+ case SPLIT_TYPE_PF:
+ pf_id = split_id;
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ port_id = split_id / dev_data->num_pfs_per_port;
+ pf_id = port_id + dev_data->num_ports *
+ (split_id % dev_data->num_pfs_per_port);
+ break;
+ case SPLIT_TYPE_VF:
+ vf_id = split_id;
+ break;
+ default:
+ break;
+ }
+
/* Try reading using DMAE */
- if (dev_data->use_dmae &&
+ if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
(len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
wide_bus)) {
if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
@@ -2493,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
"Failed reading from chip using DMAE, using GRC instead\n");
}
- /* Read registers */
+ /* If not read using DMAE, read using GRC */
+
+ /* Set pretend */
+ if (split_type != dev_data->pretend.split_type || split_id !=
+ dev_data->pretend.split_id) {
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ qed_port_pretend(p_hwfn, p_ptt, port_id);
+ break;
+ case SPLIT_TYPE_PF:
+ fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
+ break;
+ case SPLIT_TYPE_VF:
+ fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
+ (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ break;
+ default:
+ break;
+ }
+
+ dev_data->pretend.split_type = (u8)split_type;
+ dev_data->pretend.split_id = split_id;
+ }
+
+ /* Read registers using GRC */
qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
return len;
@@ -2517,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
- bool dump, u32 addr, u32 len, bool wide_bus)
+ bool dump, u32 addr, u32 len, bool wide_bus,
+ enum init_split_types split_type, u8 split_id)
{
u32 offset = 0;
@@ -2525,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, addr, len, wide_bus);
+ dump, addr, len, wide_bus,
+ split_type, split_id);
return offset;
}
@@ -2558,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, addr, curr_len, false);
+ dump, addr, curr_len, false,
+ SPLIT_TYPE_NONE, 0);
reg_offset += curr_len;
addr += curr_len;
@@ -2580,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
struct dbg_array input_regs_arr,
u32 *dump_buf,
bool dump,
+ enum init_split_types split_type,
+ u8 split_id,
bool block_enable[MAX_BLOCK_ID],
u32 *num_dumped_reg_entries)
{
@@ -2627,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
dump,
addr,
len,
- wide_bus);
+ wide_bus,
+ split_type, split_id);
(*num_dumped_reg_entries)++;
}
}
@@ -2642,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
- const char *split_type_name,
- u32 split_id,
+ enum init_split_types split_type,
+ u8 split_id,
const char *param_name,
const char *param_val)
{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ enum init_split_types hdr_split_type = split_type;
u32 num_dumped_reg_entries, offset;
+ u8 hdr_split_id = split_id;
+
+ /* In PORT_PF split type, print a port split header */
+ if (split_type == SPLIT_TYPE_PORT_PF) {
+ hdr_split_type = SPLIT_TYPE_PORT;
+ hdr_split_id = split_id / dev_data->num_pfs_per_port;
+ }
/* Calculate register dump header size (and skip it for now) */
offset = qed_grc_dump_regs_hdr(dump_buf,
false,
0,
- split_type_name,
- split_id, param_name, param_val);
+ hdr_split_type,
+ hdr_split_id, param_name, param_val);
/* Dump registers */
offset += qed_grc_dump_regs_entries(p_hwfn,
@@ -2662,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
input_regs_arr,
dump_buf + offset,
dump,
+ split_type,
+ split_id,
block_enable,
&num_dumped_reg_entries);
@@ -2670,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
qed_grc_dump_regs_hdr(dump_buf,
dump,
num_dumped_reg_entries,
- split_type_name,
- split_id, param_name, param_val);
+ hdr_split_type,
+ hdr_split_id, param_name, param_val);
return num_dumped_reg_entries > 0 ? offset : 0;
}
@@ -2687,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct chip_platform_defs *chip_platform;
u32 offset = 0, input_offset = 0;
- struct chip_defs *chip;
- u8 port_id, pf_id, vf_id;
u16 fid;
-
- chip = &s_chip_defs[dev_data->chip_id];
- chip_platform = &chip->per_platform[dev_data->platform_id];
-
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
struct dbg_array curr_input_regs_arr;
+ enum init_split_types split_type;
+ u16 split_count = 0;
u32 split_data_size;
- u8 split_type_id;
+ u8 split_id;
split_hdr =
(const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
- split_type_id =
+ split_type =
GET_FIELD(split_hdr->hdr,
DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
split_data_size =
@@ -2716,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
curr_input_regs_arr.size_in_dwords = split_data_size;
- switch (split_type_id) {
+ switch (split_type) {
case SPLIT_TYPE_NONE:
- offset += qed_grc_dump_split_data(p_hwfn,
- p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump,
- block_enable,
- "eng",
- (u32)(-1),
- param_name,
- param_val);
+ split_count = 1;
break;
-
case SPLIT_TYPE_PORT:
- for (port_id = 0; port_id < chip_platform->num_ports;
- port_id++) {
- if (dump)
- qed_port_pretend(p_hwfn, p_ptt,
- port_id);
- offset +=
- qed_grc_dump_split_data(p_hwfn, p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump, block_enable,
- "port", port_id,
- param_name,
- param_val);
- }
+ split_count = dev_data->num_ports;
break;
-
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0; pf_id < chip_platform->num_pfs;
- pf_id++) {
- u8 pfid_shift =
- PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
-
- if (dump) {
- fid = pf_id << pfid_shift;
- qed_fid_pretend(p_hwfn, p_ptt, fid);
- }
-
- offset +=
- qed_grc_dump_split_data(p_hwfn,
- p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump,
- block_enable,
- "pf",
- pf_id,
- param_name,
- param_val);
- }
+ split_count = dev_data->num_ports *
+ dev_data->num_pfs_per_port;
break;
-
case SPLIT_TYPE_VF:
- for (vf_id = 0; vf_id < chip_platform->num_vfs;
- vf_id++) {
- u8 vfvalid_shift =
- PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
- u8 vfid_shift =
- PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
-
- if (dump) {
- fid = BIT(vfvalid_shift) |
- (vf_id << vfid_shift);
- qed_fid_pretend(p_hwfn, p_ptt, fid);
- }
-
- offset +=
- qed_grc_dump_split_data(p_hwfn, p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump, block_enable,
- "vf", vf_id,
- param_name,
- param_val);
- }
+ split_count = dev_data->num_vfs;
break;
-
default:
- break;
+ return 0;
}
+ for (split_id = 0; split_id < split_count; split_id++)
+ offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ split_type,
+ split_id,
+ param_name,
+ param_val);
+
input_offset += split_data_size;
}
- /* Pretend to original PF */
+ /* Cancel pretends (pretend to original PF) */
if (dump) {
fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
qed_fid_pretend(p_hwfn, p_ptt, fid);
+ dev_data->pretend.split_type = SPLIT_TYPE_NONE;
+ dev_data->pretend.split_id = 0;
}
return offset;
@@ -2824,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0, "eng", -1, NULL, NULL);
+ false, 0,
+ SPLIT_TYPE_NONE, 0, NULL, NULL);
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
@@ -2837,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
dump,
BYTES_TO_DWORDS
(s_reset_regs_defs[i].addr), 1,
- false);
+ false, SPLIT_TYPE_NONE, 0);
num_regs++;
}
/* Write header */
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
- true, num_regs, "eng", -1, NULL, NULL);
+ true, num_regs, SPLIT_TYPE_NONE,
+ 0, NULL, NULL);
return offset;
}
@@ -2863,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
- false, 0, "eng", -1, NULL, NULL);
+ false, 0, SPLIT_TYPE_NONE,
+ 0, NULL, NULL);
/* Write parity registers */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
@@ -2898,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- 1, false);
+ 1, false,
+ SPLIT_TYPE_NONE, 0);
addr = GET_FIELD(reg_data->data,
DBG_ATTN_REG_STS_ADDRESS);
offset += qed_grc_dump_reg_entry(p_hwfn,
@@ -2906,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- 1, false);
+ 1, false,
+ SPLIT_TYPE_NONE, 0);
num_reg_entries += 2;
}
}
@@ -2928,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump,
addr,
1,
- false);
+ false, SPLIT_TYPE_NONE, 0);
num_reg_entries++;
}
@@ -2936,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true,
- num_reg_entries, "eng", -1, NULL, NULL);
+ num_reg_entries, SPLIT_TYPE_NONE,
+ 0, NULL, NULL);
return offset;
}
@@ -2949,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
u32 offset = 0, addr;
offset += qed_grc_dump_regs_hdr(dump_buf,
- dump, 2, "eng", -1, NULL, NULL);
+ dump, 2, SPLIT_TYPE_NONE, 0,
+ NULL, NULL);
/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
* skipped).
@@ -3095,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, addr, len, wide_bus);
+ dump, addr, len, wide_bus,
+ SPLIT_TYPE_NONE, 0);
return offset;
}
@@ -3234,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
struct dbg_array curr_input_mems_arr;
+ enum init_split_types split_type;
u32 split_data_size;
- u8 split_type_id;
split_hdr = (const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- split_type_id =
+ split_type =
GET_FIELD(split_hdr->hdr,
DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
split_data_size =
@@ -3249,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
curr_input_mems_arr.size_in_dwords = split_data_size;
- switch (split_type_id) {
- case SPLIT_TYPE_NONE:
+ if (split_type == SPLIT_TYPE_NONE)
offset += qed_grc_dump_mem_entries(p_hwfn,
p_ptt,
curr_input_mems_arr,
dump_buf + offset,
dump);
- break;
-
- default:
+ else
DP_NOTICE(p_hwfn,
"Dumping split memories is currently not supported\n");
- break;
- }
input_offset += split_data_size;
}
@@ -3622,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
dump,
addr,
num_dwords_to_read,
- false);
+ false,
+ SPLIT_TYPE_NONE, 0);
total_dwords -= num_dwords_to_read;
rss_addr++;
}
@@ -3650,8 +3693,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
: 128;
- strscpy(type_name, big_ram->instance_name, sizeof(type_name));
- strscpy(mem_name, big_ram->instance_name, sizeof(mem_name));
+ strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
+ strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3681,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
dump,
addr,
len,
- false);
+ false, SPLIT_TYPE_NONE, 0);
}
return offset;
@@ -3730,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
- dump, 1, "eng", -1, "block", "MCP");
+ dump, 1, SPLIT_TYPE_NONE, 0,
+ "block", "MCP");
addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
@@ -3738,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
addr,
1,
- false);
+ false, SPLIT_TYPE_NONE, 0);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3922,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
dump,
addr,
len,
- true);
+ true, SPLIT_TYPE_NONE,
+ 0);
}
/* Disable block's client and debug output */
@@ -3948,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
bool parities_masked = false;
- u8 i, port_mode = 0;
u32 offset = 0;
+ u8 i;
*num_dumped_dwords = 0;
+ dev_data->num_regs_read = 0;
- if (dump) {
- /* Find port mode */
- switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
- case 0:
- port_mode = 1;
- break;
- case 1:
- port_mode = 2;
- break;
- case 2:
- port_mode = 4;
- break;
- }
-
- /* Update reset state */
+ /* Update reset state */
+ if (dump)
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- }
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -3988,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
qed_grc_get_param(p_hwfn,
DBG_GRC_PARAM_NUM_LTIDS));
offset += qed_dump_num_param(dump_buf + offset,
- dump, "num-ports", port_mode);
+ dump, "num-ports", dev_data->num_ports);
/* Dump reset registers (dumped before taking blocks out of reset ) */
if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
@@ -4092,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_phy(p_hwfn,
p_ptt, dump_buf + offset, dump);
- /* Dump static debug data */
+ /* Dump static debug data (only if not during debug bus recording) */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_STATIC) &&
- dev_data->bus.state == DBG_BUS_STATE_IDLE)
+ (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
offset += qed_grc_dump_static_debug(p_hwfn,
p_ptt,
dump_buf + offset, dump);
@@ -4249,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- reg->size, wide_bus);
+ reg->size, wide_bus,
+ SPLIT_TYPE_NONE, 0);
}
}
@@ -4372,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
next_reg_offset,
dump, addr,
reg->entry_size,
- wide_bus);
+ wide_bus,
+ SPLIT_TYPE_NONE, 0);
}
/* Call rule condition function.
@@ -4722,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
BYTES_TO_DWORDS(trace_data_grc_addr),
- trace_data_size_dwords, false);
+ trace_data_size_dwords, false,
+ SPLIT_TYPE_NONE, 0);
/* Resume MCP (only if halt succeeded) */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -4828,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
true,
addr,
len,
- true);
+ true, SPLIT_TYPE_NONE,
+ 0);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
}
@@ -4897,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
true,
addr,
len,
- true);
+ true, SPLIT_TYPE_NONE,
+ 0);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
}
@@ -4955,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
true,
addr,
override_window_dwords,
- true);
+ true, SPLIT_TYPE_NONE, 0);
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
out:
@@ -4997,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
continue;
/* Read FW info for the current Storm */
- qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+ qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
asserts = &fw_info.fw_asserts_section;
@@ -5035,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump, addr,
asserts->list_element_dword_size,
- false);
+ false, SPLIT_TYPE_NONE, 0);
}
/* Dump last section */
@@ -5062,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+
+ /* Skip Storm if it's in reset */
+ if (dev_data->block_in_reset[storm->block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
+
+ return true;
+ }
+
+ return false;
+}
+
/* Assign default GRC param values */
void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
{
@@ -7778,6 +7837,57 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
}
+int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
+{
+ struct qed_nvm_image_att image_att;
+ int rc;
+
+ *length = 0;
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
+ if (rc)
+ return rc;
+
+ *length = image_att.length;
+
+ return rc;
+}
+
+int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes, enum qed_nvm_images image_id)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ u32 len_rounded, i;
+ __be32 val;
+ int rc;
+
+ *num_dumped_bytes = 0;
+ rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
+ if (rc)
+ return rc;
+
+ DP_NOTICE(p_hwfn->cdev,
+ "Collecting a debug feature [\"nvram image %d\"]\n",
+ image_id);
+
+ len_rounded = roundup(len_rounded, sizeof(u32));
+ rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
+ if (rc)
+ return rc;
+
+ /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
+ if (image_id != QED_NVM_IMAGE_NVM_META)
+ for (i = 0; i < len_rounded; i += 4) {
+ val = cpu_to_be32(*(u32 *)(buffer + i));
+ *(u32 *)(buffer + i) = val;
+ }
+
+ *num_dumped_bytes = len_rounded;
+
+ return rc;
+}
+
int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes)
{
@@ -7831,6 +7941,9 @@ enum debug_print_features {
IGU_FIFO = 6,
PHY = 7,
FW_ASSERTS = 8,
+ NVM_CFG1 = 9,
+ DEFAULT_CFG = 10,
+ NVM_META = 11,
};
static u32 qed_calc_regdump_header(enum debug_print_features feature,
@@ -7965,13 +8078,61 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
+ /* nvm cfg1 */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
+ &feature_size, QED_NVM_IMAGE_NVM_CFG1);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(NVM_CFG1, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
+ }
+
+ /* nvm default */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
+ &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
+ rc);
+ }
+
+ /* nvm meta */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
+ &feature_size, QED_NVM_IMAGE_NVM_META);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(NVM_META, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
+ }
+
return 0;
}
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ u32 regs_len = 0, image_len = 0;
u8 cur_engine, org_engine;
- u32 regs_len = 0;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7993,6 +8154,15 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
/* Engine common */
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
return regs_len;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index d2ad5e92c74f..b285edc8d6a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1098,7 +1098,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ "Sending final cleanup for PFVF[%d] [Command %08x]\n",
id, command);
qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
@@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- switch (p_hwfn->cdev->mf_mode) {
- case QED_MF_DEFAULT:
- case QED_MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
- case QED_MF_OVLAN:
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
- break;
- default:
- DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ else
hw_mode |= 1 << MODE_MF_SI;
- }
hw_mode |= 1 << MODE_ASIC;
@@ -1507,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
}
/* Enable classification by MAC if needed */
@@ -1557,7 +1554,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
- p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
@@ -1644,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
+ u16 ether_type;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
@@ -1677,6 +1674,24 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (rc)
return rc;
+ if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
+ &cdev->mf_bits) ||
+ test_bit(QED_MF_8021AD_TAGGING,
+ &cdev->mf_bits))) {
+ if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
+ ether_type = ETH_P_8021Q;
+ else
+ ether_type = ETH_P_8021AD;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
qed_fill_load_req_params(&load_req_params,
p_params->p_drv_load_params);
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
@@ -2639,31 +2654,57 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
- /* Read Multi-function information from shmem */
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, glob) +
- offsetof(struct nvm_cfg1_glob, generic_cont0);
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct qed_dev *cdev = p_hwfn->cdev;
- generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
- mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
- NVM_CFG1_GLOB_MF_MODE_OFFSET;
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
- switch (mf_mode) {
- case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
- p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
- break;
- case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
- p_hwfn->cdev->mf_mode = QED_MF_NPAR;
- break;
- case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
- p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
- break;
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_UFP_SPECIFIC) |
+ BIT(QED_MF_8021Q_TAGGING);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_8021AD_TAGGING);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_LL2_NON_UNICAST) |
+ BIT(QED_MF_INTER_PF_SWITCH);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_LL2_NON_UNICAST);
+ if (QED_IS_BB(p_hwfn->cdev))
+ cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
+ break;
+ }
+
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ cdev->mf_bits);
}
- DP_INFO(p_hwfn, "Multi function mode is %08x\n",
- p_hwfn->cdev->mf_mode);
- /* Read Multi-function information from shmem */
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->cdev->mf_bits);
+
+ /* Read device capabilities information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, device_capabilities);
@@ -2751,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
{
u32 port_mode;
- port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engine = 1;
@@ -2856,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_mcp_cmd_port_init(p_hwfn, p_ptt);
qed_get_eee_caps(p_hwfn, p_ptt);
+
+ qed_mcp_read_ufp_config(p_hwfn, p_ptt);
}
if (qed_mcp_is_init(p_hwfn)) {
@@ -3462,7 +3505,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0, en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
return 0;
qed_llh_mac_to_filter(&high, &low, p_filter);
@@ -3507,7 +3550,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
return;
qed_llh_mac_to_filter(&high, &low, p_filter);
@@ -3549,7 +3592,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0, en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
return 0;
switch (type) {
@@ -3647,7 +3690,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
return;
switch (type) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 2dc9b312a795..cc1b373c0ace 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
p_data->flags = p_conn->flags;
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ SET_FIELD(p_data->flags,
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
p_data->def_q_idx = p_conn->def_q_idx;
return qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 7f5ec42dde48..bee10c1781fb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1095,14 +1095,16 @@ enum personality_type {
struct pf_start_tunnel_config {
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
- u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved[3];
};
/* Ramrod data for PF start ramrod */
@@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config {
u8 update_rx_def_non_ucast_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
+ u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved;
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved1[3];
};
/* Data for port update ramrod */
@@ -2535,7 +2540,14 @@ struct idle_chk_data {
u16 reserved2;
};
-/* Debug Tools data (per HW function) */
+struct pretend_params {
+ u8 split_type;
+ u8 reserved;
+ u16 split_id;
+};
+
+/* Debug Tools data (per HW function)
+ */
struct dbg_tools_data {
struct dbg_grc_data grc;
struct dbg_bus_data bus;
@@ -2544,8 +2556,13 @@ struct dbg_tools_data {
u8 block_in_reset[88];
u8 chip_id;
u8 platform_id;
+ u8 num_ports;
+ u8 num_pfs_per_port;
+ u8 num_vfs;
u8 initialized;
u8 use_dmae;
+ u8 reserved;
+ struct pretend_params pretend;
u32 num_regs_read;
};
@@ -2975,6 +2992,24 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
/**
+ * @brief qed_read_fw_info - Reads FW info from the chip.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param fw_info - Out: a pointer to write the FW info into.
+ *
+ * @return true if the FW info was read successfully from one of the Storms,
+ * or false if all Storms are in reset.
+ */
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+
+/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
*
@@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
*/
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+#define NUM_STORMS 6
+
+/**
+ * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param assert_level - An array of assert levels for each storm.
+ *
+ */
+void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS]);
+
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size)
+
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[48].base + ((pf_id) * IRO[48].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[49].base + ((pf_id) * IRO[49].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[50].base + ((pf_id) * IRO[50].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[51].base + ((pf_id) * IRO[51].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[52].base + ((pf_id) * IRO[52].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[47].base + ((pf_id) * IRO[47].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+ (IRO[53].base + ((pf_id) * IRO[53].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size)
/* Tstorm RoCE Event Statistics */
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+ (IRO[54].base + ((roce_pf_id) * IRO[54].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size)
/* DCQCN Received Statistics */
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
- (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+ (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size)
+
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size)
/* DCQCN Sent Statistics */
#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+ (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size)
-static const struct iro iro_arr[51] = {
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size)
+
+static const struct iro iro_arr[59] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb8, 0x88, 0x0, 0x0, 0x88},
{0x6530, 0x20, 0x0, 0x0, 0x20},
@@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = {
{0x10768, 0x20, 0x0, 0x0, 0x20},
{0x2d48, 0x80, 0x0, 0x0, 0x10},
{0x5048, 0x10, 0x0, 0x0, 0x10},
+ {0xc748, 0x8, 0x0, 0x0, 0x1},
+ {0xa128, 0x8, 0x0, 0x0, 0x1},
+ {0x10f00, 0x8, 0x0, 0x0, 0x1},
+ {0xf030, 0x8, 0x0, 0x0, 0x1},
+ {0x13028, 0x8, 0x0, 0x0, 0x1},
+ {0x12c58, 0x8, 0x0, 0x0, 0x1},
{0xc9b8, 0x30, 0x0, 0x0, 0x10},
- {0xed90, 0x10, 0x0, 0x0, 0x10},
- {0xa3a0, 0x10, 0x0, 0x0, 0x10},
+ {0xed90, 0x28, 0x0, 0x0, 0x28},
+ {0xa520, 0x18, 0x0, 0x0, 0x18},
+ {0xa6a0, 0x8, 0x0, 0x0, 0x8},
{0x13108, 0x8, 0x0, 0x0, 0x8},
+ {0x13c50, 0x18, 0x0, 0x0, 0x18},
};
/* Runtime array offsets */
@@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = {
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
-
-#define RUNTIME_ARRAY_SIZE 43023
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
+
+#define RUNTIME_ARRAY_SIZE 43022
+
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode {
#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7
};
/* Command for setting tpa parameters */
@@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx {
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
- __le32 mw_cnt;
+ __le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
@@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx {
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
- __le32 mw_cnt;
+ __le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
@@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data {
struct regpair va;
struct regpair pbl_base;
struct regpair dif_error_addr;
- struct regpair dif_runt_addr;
- __le32 reserved4[2];
+ __le32 reserved4[4];
};
/* rdma resize cq output params */
@@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data {
enum rdma_tid_type {
RDMA_TID_REGISTERED_MR,
RDMA_TID_FMR,
- RDMA_TID_MW_TYPE1,
- RDMA_TID_MW_TYPE2A,
+ RDMA_TID_MW,
MAX_RDMA_TID_TYPE
};
@@ -7681,6 +7779,16 @@ struct e4_roce_conn_context {
struct ustorm_roce_conn_st_ctx ustorm_st_context;
};
+/* roce cqes statistics */
+struct roce_cqe_stats {
+ __le32 req_cqe_error;
+ __le32 req_remote_access_errors;
+ __le32 req_remote_invalid_request;
+ __le32 resp_cqe_error;
+ __le32 resp_local_length_error;
+ __le32 reserved;
+};
+
/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data {
__le16 flags;
@@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats {
/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params {
- __le32 num_bound_mw;
__le32 cq_prod;
+ __le32 reserved;
};
/* RoCE destroy qp requester ramrod data */
@@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data {
/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params {
- __le32 num_invalidated_mw;
__le32 cq_prod;
+ __le32 reserved;
};
/* RoCE destroy qp responder ramrod data */
@@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* roce error statistics */
+struct roce_error_stats {
+ __le32 resp_remote_access_errors;
+ __le32 reserved;
+};
+
/* roce special events statistics */
struct roce_events_stats {
- __le16 silent_drops;
- __le16 rnr_naks_sent;
+ __le32 silent_drops;
+ __le32 rnr_naks_sent;
__le32 retransmit_count;
__le32 icrc_error_count;
- __le32 reserved;
+ __le32 implied_nak_seq_err;
+ __le32 duplicate_request;
+ __le32 local_ack_timeout_err;
+ __le32 out_of_sequence;
+ __le32 packet_seq_err;
+ __le32 rnr_nak_retry_err;
};
-/* ROCE slow path EQ cmd IDs */
+/* roce slow path EQ cmd IDs */
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
@@ -7845,6 +7964,9 @@ struct roce_init_func_params {
u8 cnp_dscp;
u8 reserved;
__le32 cnp_send_timeout;
+ __le16 rl_offset;
+ u8 rl_count_log;
+ u8 reserved1[5];
};
/* roce func init ramrod data */
@@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
__le16 rq_prod;
__le16 conn_dpi;
__le16 irq_cons;
- __le32 num_invlidated_mw;
+ __le32 reg9;
__le32 reg10;
};
@@ -9725,6 +9847,8 @@ enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
MAX_IWARP_EQE_ASYNC_OPCODE
};
@@ -11863,6 +11987,8 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
+ u32 data_ptr;
+ u32 data_size;
};
struct fw_flr_mb {
@@ -11993,6 +12119,17 @@ struct public_port {
#define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000
#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 reserved1;
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
};
struct public_func {
@@ -12069,6 +12206,23 @@ struct public_func {
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
#define DRV_ID_DRV_INIT_HW_SHIFT 31
#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
};
struct mcp_mac {
@@ -12295,6 +12449,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
@@ -12495,6 +12650,8 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_MAX
};
@@ -12530,6 +12687,233 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
+/* OCBB definitions */
+enum tlvs {
+ /* Category 1: Device Properties */
+ DRV_TLV_CLP_STR,
+ DRV_TLV_CLP_STR_CTD,
+ /* Category 6: Device Configuration */
+ DRV_TLV_SCSI_TO,
+ DRV_TLV_R_T_TOV,
+ DRV_TLV_R_A_TOV,
+ DRV_TLV_E_D_TOV,
+ DRV_TLV_CR_TOV,
+ DRV_TLV_BOOT_TYPE,
+ /* Category 8: Port Configuration */
+ DRV_TLV_NPIV_ENABLED,
+ /* Category 10: Function Configuration */
+ DRV_TLV_FEATURE_FLAGS,
+ DRV_TLV_LOCAL_ADMIN_ADDR,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+ DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+ DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+ DRV_TLV_PROMISCUOUS_MODE,
+ DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+ DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+ DRV_TLV_OS_DRIVER_STATES,
+ DRV_TLV_PXE_BOOT_PROGRESS,
+ /* Category 12: FC/FCoE Configuration */
+ DRV_TLV_NPIV_STATE,
+ DRV_TLV_NUM_OF_NPIV_IDS,
+ DRV_TLV_SWITCH_NAME,
+ DRV_TLV_SWITCH_PORT_NUM,
+ DRV_TLV_SWITCH_PORT_ID,
+ DRV_TLV_VENDOR_NAME,
+ DRV_TLV_SWITCH_MODEL,
+ DRV_TLV_SWITCH_FW_VER,
+ DRV_TLV_QOS_PRIORITY_PER_802_1P,
+ DRV_TLV_PORT_ALIAS,
+ DRV_TLV_PORT_STATE,
+ DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_LINK_FAILURE_COUNT,
+ DRV_TLV_FCOE_BOOT_PROGRESS,
+ /* Category 13: iSCSI Configuration */
+ DRV_TLV_TARGET_LLMNR_ENABLED,
+ DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+ DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+ DRV_TLV_AUTHENTICATION_METHOD,
+ DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+ DRV_TLV_MAX_FRAME_SIZE,
+ DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_ISCSI_BOOT_PROGRESS,
+ /* Category 20: Device Data */
+ DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+ DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+ DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+ DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+ DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+ DRV_TLV_NCSI_TX_BYTES_SENT,
+ /* Category 22: Base Port Data */
+ DRV_TLV_RX_DISCARDS,
+ DRV_TLV_RX_ERRORS,
+ DRV_TLV_TX_ERRORS,
+ DRV_TLV_TX_DISCARDS,
+ DRV_TLV_RX_FRAMES_RECEIVED,
+ DRV_TLV_TX_FRAMES_SENT,
+ /* Category 23: FC/FCoE Port Data */
+ DRV_TLV_RX_BROADCAST_PACKETS,
+ DRV_TLV_TX_BROADCAST_PACKETS,
+ /* Category 28: Base Function Data */
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+ DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_PF_RX_FRAMES_RECEIVED,
+ DRV_TLV_RX_BYTES_RECEIVED,
+ DRV_TLV_PF_TX_FRAMES_SENT,
+ DRV_TLV_TX_BYTES_SENT,
+ DRV_TLV_IOV_OFFLOAD,
+ DRV_TLV_PCI_ERRORS_CAP_ID,
+ DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+ DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+ DRV_TLV_CORRECTABLE_ERROR_STATUS,
+ DRV_TLV_CORRECTABLE_ERROR_MASK,
+ DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+ DRV_TLV_TX_QUEUES_EMPTY,
+ DRV_TLV_RX_QUEUES_EMPTY,
+ DRV_TLV_TX_QUEUES_FULL,
+ DRV_TLV_RX_QUEUES_FULL,
+ /* Category 29: FC/FCoE Function Data */
+ DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+ DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+ DRV_TLV_FCOE_TX_FRAMES_SENT,
+ DRV_TLV_FCOE_TX_BYTES_SENT,
+ DRV_TLV_CRC_ERROR_COUNT,
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+ DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+ DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+ DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+ DRV_TLV_DISPARITY_ERROR_COUNT,
+ DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_RJT,
+ DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+ DRV_TLV_FDISCS_SENT_COUNT,
+ DRV_TLV_FDISC_ACCS_RECEIVED,
+ DRV_TLV_FDISC_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_SENT_COUNT,
+ DRV_TLV_PLOGI_ACCS_RECEIVED,
+ DRV_TLV_PLOGI_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_1_TIMESTAMP,
+ DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_2_TIMESTAMP,
+ DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_3_TIMESTAMP,
+ DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_4_TIMESTAMP,
+ DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_5_TIMESTAMP,
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+ DRV_TLV_LOGOS_ISSUED,
+ DRV_TLV_LOGO_ACCS_RECEIVED,
+ DRV_TLV_LOGO_RJTS_RECEIVED,
+ DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_1_TIMESTAMP,
+ DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_2_TIMESTAMP,
+ DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_3_TIMESTAMP,
+ DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_4_TIMESTAMP,
+ DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_5_TIMESTAMP,
+ DRV_TLV_LOGOS_RECEIVED,
+ DRV_TLV_ACCS_ISSUED,
+ DRV_TLV_PRLIS_ISSUED,
+ DRV_TLV_ACCS_RECEIVED,
+ DRV_TLV_ABTS_SENT_COUNT,
+ DRV_TLV_ABTS_ACCS_RECEIVED,
+ DRV_TLV_ABTS_RJTS_RECEIVED,
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_1_TIMESTAMP,
+ DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_2_TIMESTAMP,
+ DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_3_TIMESTAMP,
+ DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_4_TIMESTAMP,
+ DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_5_TIMESTAMP,
+ DRV_TLV_RSCNS_RECEIVED,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+ DRV_TLV_LUN_RESETS_ISSUED,
+ DRV_TLV_ABORT_TASK_SETS_ISSUED,
+ DRV_TLV_TPRLOS_SENT,
+ DRV_TLV_NOS_SENT_COUNT,
+ DRV_TLV_NOS_RECEIVED_COUNT,
+ DRV_TLV_OLS_COUNT,
+ DRV_TLV_LR_COUNT,
+ DRV_TLV_LRR_COUNT,
+ DRV_TLV_LIP_SENT_COUNT,
+ DRV_TLV_LIP_RECEIVED_COUNT,
+ DRV_TLV_EOFA_COUNT,
+ DRV_TLV_EOFNI_COUNT,
+ DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+ DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+ DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+ /* Category 30: iSCSI Function Data */
+ DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+ DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
+};
+
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index fca2dbd93ad9..70504dcf4087 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
*(u32 *)&p_ptt->pxp.pretend);
}
+void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 port_id, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 8db2839a8ec8..505e94db939d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -245,6 +245,18 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief qed_port_fid_pretend - pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ */
+void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 port_id, u16 fid);
+
+/**
* @brief qed_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 1365da7c8900..d845badf9b90 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Search no IP as GFT */
+ search_non_ip_as_gft = 0;
+
/* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
@@ -1337,9 +1340,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+
+ /* Allow tunneled traffic without inner IP */
+ search_non_ip_as_gft = 1;
}
qed_wr(p_hwfn,
+ p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
+ qed_wr(p_hwfn,
p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
ram_line_lo);
@@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+
+static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+
+ default:
+ return 0;
+ }
+}
+
+void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS])
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
+
+ qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 2a2b1018ed1d..90a2b53096e2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
p_ramrod->sq_num_pages = qp->sq_num_pages;
p_ramrod->rq_num_pages = qp->rq_num_pages;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
@@ -1157,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt,
struct qed_iwarp_info *iwarp_info;
struct qed_iwarp_ep *ep;
u8 mpa_data_size = 0;
- u8 ts_hdr_size = 0;
u32 cid;
int rc;
@@ -1216,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt,
iparams->cm_info.private_data,
iparams->cm_info.private_data_len);
- if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
- ts_hdr_size = TIMESTAMP_HEADER_SIZE;
-
- ep->mss = iparams->mss - ts_hdr_size;
+ ep->mss = iparams->mss;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
ep->event_cb = iparams->event_cb;
@@ -2335,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
u8 local_mac_addr[ETH_ALEN];
struct qed_iwarp_ep *ep;
int tcp_start_offset;
- u8 ts_hdr_size = 0;
u8 ll2_syn_handle;
int payload_len;
u32 hdr_size;
@@ -2413,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
- if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
- ts_hdr_size = TIMESTAMP_HEADER_SIZE;
-
- hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
- ts_hdr_size;
+ hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
@@ -3004,8 +2997,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
union event_ring_data *data,
u8 fw_return_code)
{
+ struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
struct regpair *fw_handle = &data->rdma_data.async_handle;
struct qed_iwarp_ep *ep = NULL;
+ u16 srq_offset;
+ u16 srq_id;
u16 cid;
ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
@@ -3067,6 +3063,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
qed_iwarp_cid_cleaned(p_hwfn, cid);
break;
+ case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
+ srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
+ /* FW assigns value that is no greater than u16 */
+ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
+ events.affiliated_event(events.context,
+ QED_IWARP_EVENT_SRQ_EMPTY,
+ &srq_id);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
+ srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
+ /* FW assigns value that is no greater than u16 */
+ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
+ events.affiliated_event(events.context,
+ QED_IWARP_EVENT_SRQ_LIMIT,
+ &srq_id);
+ break;
case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 8667799d0069..1f6ac848109d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & QED_ACCEPT_BCAST));
+ SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
+ !!(accept_filter & QED_ACCEPT_ANY_VNI));
+
p_ramrod->rx_mode.state = cpu_to_le16(state);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"p_ramrod->rx_mode.state = 0x%x\n", state);
@@ -1677,6 +1680,8 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(tstats.mftag_filter_discard);
p_stats->common.mac_filter_discards +=
HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.gft_filter_drop +=
+ HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
}
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1852,6 +1857,11 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
p_ah->tx_1519_to_max_byte_packets =
port_stats.eth.u1.ah1.t1519_to_max;
}
+
+ p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ link_change_count));
}
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
@@ -1959,11 +1969,14 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
+ * Link change stat is maintained by MFW, return its value as is.
*/
- if (!cdev->reset_stats)
+ if (!cdev->reset_stats) {
DP_INFO(cdev, "Reset stats not allocated\n");
- else
+ } else {
_qed_get_vport_stats(cdev, cdev->reset_stats);
+ cdev->reset_stats->common.link_change_count = 0;
+ }
}
static enum gft_profile_type
@@ -1973,6 +1986,8 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
return GFT_PROFILE_TYPE_4_TUPLE;
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
return GFT_PROFILE_TYPE_IP_DST_ADDR;
+ if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
+ return GFT_PROFILE_TYPE_IP_SRC_ADDR;
return GFT_PROFILE_TYPE_L4_DST_PORT;
}
@@ -2013,16 +2028,6 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc = -EINVAL;
- rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc)
- return rc;
-
- if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
- rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
- if (rc)
- return rc;
- }
-
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
@@ -2047,15 +2052,28 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
- if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
- p_ramrod->rx_qid_valid = 1;
- p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+ if (p_params->b_is_drop) {
+ p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
+ } else {
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
+ &abs_rx_q_id);
+ if (rc)
+ return rc;
+
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+ }
+
+ p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
}
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
-
- p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
@@ -2848,6 +2866,24 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
+{
+ int i, ret;
+
+ if (IS_PF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_QED_SRIOV
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
@@ -2885,6 +2921,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
.configure_arfs_searcher = &qed_configure_arfs_searcher,
.get_coalesce = &qed_get_coalesce,
+ .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index c4030e949cce..806a8da257e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -183,6 +183,7 @@ struct qed_filter_accept_flags {
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
+#define QED_ACCEPT_ANY_VNI 0x40
};
struct qed_arfs_config_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 468c59d2e491..c97ebd681c47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -960,12 +960,16 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_stripping_en =
p_ll2_conn->input.rx_vlan_removal_en;
+
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
+ p_ramrod->report_outer_vlan = 1;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
- if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
- p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
- (conn_type != QED_LL2_TYPE_IWARP)) {
+ if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
+ p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
+ conn_type != QED_LL2_TYPE_IWARP) {
p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1;
} else {
@@ -1534,11 +1538,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
+ ETH_P_FCOE, 0,
+ QED_LLH_FILTER_ETHERTYPE);
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- 0x8906, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- 0x8914, 0,
+ ETH_P_FIP, 0,
QED_LLH_FILTER_ETHERTYPE);
}
@@ -1694,11 +1699,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
- p_ll2->input.conn_type == QED_LL2_TYPE_OOO)
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
- else
+ } else {
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
+ pkt->remove_stag = true;
+ }
+
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
@@ -1709,6 +1719,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
+ !!(pkt->remove_stag));
+
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
@@ -1933,11 +1946,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
+ ETH_P_FCOE, 0,
+ QED_LLH_FILTER_ETHERTYPE);
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- 0x8906, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- 0x8914, 0,
+ ETH_P_FIP, 0,
QED_LLH_FILTER_ETHERTYPE);
}
@@ -2399,7 +2413,8 @@ fail:
return -EINVAL;
}
-static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
+ unsigned long xmit_flags)
{
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
@@ -2444,6 +2459,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
pkt.first_frag = mapping;
pkt.first_frag_len = skb->len;
pkt.cookie = skb;
+ if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
+ test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
+ pkt.remove_stag = true;
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 7870ae2a6f7e..b04d57ca5176 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -64,6 +64,7 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
+#define QED_RDMA_SRQS QED_ROCE_QPS
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -264,7 +265,6 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
- dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
@@ -273,7 +273,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
+ &cdev->mf_bits);
dev_info->tx_switching = true;
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
@@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
if (IS_ENABLED(CONFIG_QED_RDMA)) {
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
@@ -946,6 +948,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
+static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+{
+ int i;
+
+ if (IS_VF(cdev))
+ return;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].slowpath_wq)
+ continue;
+
+ flush_workqueue(cdev->hwfns[i].slowpath_wq);
+ destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+ }
+}
+
+static void qed_slowpath_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ slowpath_task.work);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+ return;
+ }
+
+ if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
+ &hwfn->slowpath_task_flags))
+ qed_mfw_process_tlv_req(hwfn, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_slowpath_wq_start(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ char name[NAME_SIZE];
+ int i;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+
+ snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+
+ hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ if (!hwfn->slowpath_wq) {
+ DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+ }
+
+ return 0;
+}
+
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
@@ -961,6 +1025,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (qed_iov_wq_start(cdev))
goto err;
+ if (qed_slowpath_wq_start(cdev))
+ goto err;
+
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
@@ -1095,6 +1162,8 @@ err:
qed_iov_wq_stop(cdev, false);
+ qed_slowpath_wq_stop(cdev);
+
return rc;
}
@@ -1103,6 +1172,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_slowpath_wq_stop(cdev);
+
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
@@ -1894,15 +1965,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
u8 *buf, u16 len)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
- int rc;
- if (!ptt)
- return -EAGAIN;
-
- rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
- qed_ptt_release(hwfn, ptt);
- return rc;
+ return qed_mcp_get_nvm_image(hwfn, type, buf, len);
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
@@ -2095,3 +2159,89 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
return;
}
}
+
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
+{
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Scheduling slowpath task [Flag: %d]\n",
+ QED_SLOWPATH_MFW_TLV_REQ);
+ smp_mb__before_atomic();
+ set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+
+ return 0;
+}
+
+static void
+qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
+{
+ struct qed_common_cb_ops *op = cdev->protocol_ops.common;
+ struct qed_eth_stats_common *p_common;
+ struct qed_generic_tlvs gen_tlvs;
+ struct qed_eth_stats stats;
+ int i;
+
+ memset(&gen_tlvs, 0, sizeof(gen_tlvs));
+ op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
+
+ if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
+ tlv->flags.ipv4_csum_offload = true;
+ if (gen_tlvs.feat_flags & QED_TLV_LSO)
+ tlv->flags.lso_supported = true;
+ tlv->flags.b_set = true;
+
+ for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
+ if (is_valid_ether_addr(gen_tlvs.mac[i])) {
+ ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
+ tlv->mac_set[i] = true;
+ }
+ }
+
+ qed_get_vport_stats(cdev, &stats);
+ p_common = &stats.common;
+ tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ tlv->rx_frames_set = true;
+ tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+ tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
+ tlv->tx_frames_set = true;
+ tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+}
+
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_buf)
+{
+ struct qed_dev *cdev = hwfn->cdev;
+ struct qed_common_cb_ops *ops;
+
+ ops = cdev->protocol_ops.common;
+ if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
+ DP_NOTICE(hwfn, "Can't collect TLV management info\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case QED_MFW_TLV_GENERIC:
+ qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
+ break;
+ case QED_MFW_TLV_ETH:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
+ break;
+ case QED_MFW_TLV_FCOE:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
+ break;
+ case QED_MFW_TLV_ISCSI:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index ec0d425766a7..6f9927d1a501 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -40,6 +40,7 @@
#include <linux/string.h>
#include <linux/etherdevice.h>
#include "qed.h"
+#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
@@ -1486,6 +1487,81 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param);
}
+void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ return;
+
+ memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, oem_cfg_port));
+ val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
+ OEM_CFG_CHANNEL_TYPE_OFFSET;
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
+
+ val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
+ if (val == OEM_CFG_SCHED_TYPE_ETS) {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
+ } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
+ } else {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
+ DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
+ }
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
+ OEM_CFG_FUNC_TC_OFFSET;
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
+ OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
+ } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
+ } else {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
+ DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
+ }
+
+ DP_NOTICE(p_hwfn,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+ p_hwfn->ufp_info.mode,
+ p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
+}
+
+static int
+qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+ qed_qm_reconf(p_hwfn, p_ptt);
+ } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
+ /* Merge UFP TC with the dcbx TC data */
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ } else {
+ DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
+ return -EINVAL;
+ }
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update_ufp(p_hwfn);
+
+ /* update stag pcp value */
+ qed_sp_pf_update_stag(p_hwfn);
+
+ return 0;
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -1529,6 +1605,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_dcbx_mib_update_event(p_hwfn, p_ptt,
QED_DCBX_OPERATIONAL_MIB);
break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
@@ -1544,6 +1623,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_GET_TLV_REQ:
+ qed_mfw_tlv_req(p_hwfn);
break;
default:
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
@@ -2529,9 +2610,8 @@ err0:
return rc;
}
-static int
+int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
struct qed_nvm_image_att *p_image_att)
{
@@ -2546,6 +2626,15 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
break;
+ case QED_NVM_IMAGE_NVM_CFG1:
+ type = NVM_TYPE_NVM_CFG1;
+ break;
+ case QED_NVM_IMAGE_DEFAULT_CFG:
+ type = NVM_TYPE_DEFAULT_CFG;
+ break;
+ case QED_NVM_IMAGE_NVM_META:
+ type = NVM_TYPE_META;
+ break;
default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
image_id);
@@ -2569,7 +2658,6 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len)
{
@@ -2578,7 +2666,7 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
memset(p_buffer, 0, buffer_len);
- rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
if (rc)
return rc;
@@ -2590,9 +2678,6 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
- image_att.length -= 4;
-
if (image_att.length > buffer_len) {
DP_VERBOSE(p_hwfn,
QED_MSG_STORAGE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 8a5c988d0c3c..632a838f1fe3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -213,6 +213,44 @@ enum qed_ov_wol {
QED_OV_WOL_ENABLED
};
+enum qed_mfw_tlv_type {
+ QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ QED_MFW_TLV_MAX = 0x16,
+};
+
+struct qed_mfw_tlv_generic {
+#define QED_MFW_TLV_FLAGS_SIZE 2
+ struct {
+ u8 ipv4_csum_offload;
+ u8 lso_supported;
+ bool b_set;
+ } flags;
+
+#define QED_MFW_TLV_MAC_COUNT 3
+ /* First entry for primary MAC, 2 secondary MACs possible */
+ u8 mac[QED_MFW_TLV_MAC_COUNT][6];
+ bool mac_set[QED_MFW_TLV_MAC_COUNT];
+
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union qed_mfw_tlv_data {
+ struct qed_mfw_tlv_generic generic;
+ struct qed_mfw_tlv_eth eth;
+ struct qed_mfw_tlv_fcoe fcoe;
+ struct qed_mfw_tlv_iscsi iscsi;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -486,7 +524,20 @@ struct qed_nvm_image_att {
* @brief Allows reading a whole nvram image
*
* @param p_hwfn
- * @param p_ptt
+ * @param image_id - image to get attributes for
+ * @param p_image_att - image attributes structure into which to fill data
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ struct qed_nvm_image_att *p_image_att);
+
+/**
+ * @brief Allows reading a whole nvram image
+ *
+ * @param p_hwfn
* @param image_id - image requested for reading
* @param p_buffer - allocated buffer into which to fill data
* @param buffer_len - length of the allocated buffer.
@@ -494,7 +545,6 @@ struct qed_nvm_image_att {
* @return 0 iff p_buffer now contains the nvram image.
*/
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
@@ -549,6 +599,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct bist_nvm_image_att *p_image_att,
u32 image_index);
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the qed client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -609,6 +670,14 @@ struct qed_mcp_mb_params {
u32 mcp_param;
};
+struct qed_drv_tlv_hdr {
+ u8 tlv_type;
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define QED_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
/**
* @brief Initialize the interface with the MCP
*
@@ -993,6 +1062,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
+ * @brief Read ufp config from the shared memory.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief Populate the nvm info shadow in the given hardware function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
new file mode 100644
index 000000000000..6c16158d8090
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -0,0 +1,1337 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+#define QED_TLV_DATA_MAX (14)
+struct qed_tlv_parsed_buf {
+ /* To be filled with the address to set in Value field */
+ void *p_val;
+
+ /* To be used internally in case the value has to be modified */
+ u8 data[QED_TLV_DATA_MAX];
+};
+
+static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ case DRV_TLV_NPIV_ENABLED:
+ case DRV_TLV_PCIE_BUS_RX_UTILIZATION:
+ case DRV_TLV_PCIE_BUS_TX_UTILIZATION:
+ case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION:
+ case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED:
+ case DRV_TLV_NCSI_RX_BYTES_RECEIVED:
+ case DRV_TLV_NCSI_TX_BYTES_SENT:
+ *tlv_group |= QED_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= QED_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = QED_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= QED_MFW_TLV_ISCSI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Returns size of the data buffer or, -1 in case TLV data is not available. */
+static int
+qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_generic *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->flags.b_set) {
+ memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+ p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ?
+ 1 : 0;
+ p_buf->data[0] |= (p_drv_buf->flags.lso_supported ?
+ 1 : 0) << 1;
+ p_buf->p_val = p_buf->data;
+ return QED_MFW_TLV_FLAGS_SIZE;
+ }
+ break;
+
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ {
+ int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR;
+
+ if (p_drv_buf->mac_set[idx]) {
+ p_buf->p_val = p_drv_buf->mac[idx];
+ return ETH_ALEN;
+ }
+ break;
+ }
+
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_eth *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ p_buf->p_val = &p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ p_buf->p_val = &p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ p_buf->p_val = &p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ p_buf->p_val = &p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ p_buf->p_val = &p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ p_buf->p_val = &p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ p_buf->p_val = &p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ p_buf->p_val = &p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ p_buf->p_val = &p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ p_buf->p_val = &p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ p_buf->p_val = &p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ if (!p_time->b_set)
+ return -1;
+
+ /* Validate numbers */
+ if (p_time->month > 12)
+ p_time->month = 0;
+ if (p_time->day > 31)
+ p_time->day = 0;
+ if (p_time->hour > 23)
+ p_time->hour = 0;
+ if (p_time->min > 59)
+ p_time->hour = 0;
+ if (p_time->msec > 999)
+ p_time->msec = 0;
+ if (p_time->usec > 999)
+ p_time->usec = 0;
+
+ memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+ snprintf(p_buf->data, 14, "%d%d%d%d%d%d",
+ p_time->month, p_time->day,
+ p_time->hour, p_time->min, p_time->msec, p_time->usec);
+
+ p_buf->p_val = p_buf->data;
+
+ return QED_MFW_TLV_TIME_SIZE;
+}
+
+static int
+qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_fcoe *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ struct qed_mfw_tlv_time *p_time;
+ u8 idx;
+
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ p_buf->p_val = &p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ p_buf->p_val = &p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ p_buf->p_val = &p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ p_buf->p_val = &p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ p_buf->p_val = &p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ p_buf->p_val = &p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ p_buf->p_val = &p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ p_buf->p_val = &p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ p_buf->p_val = &p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ p_buf->p_val = &p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ p_buf->p_val = &p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ p_buf->p_val = &p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ p_buf->p_val = &p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ p_buf->p_val = &p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ p_buf->p_val = &p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ p_buf->p_val = &p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ p_buf->p_val = &p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ p_buf->p_val = &p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ p_buf->p_val = &p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ p_buf->p_val = &p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ p_buf->p_val = &p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2;
+
+ if (p_drv_buf->crc_err_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx];
+ return sizeof(p_drv_buf->crc_err_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx],
+ p_buf);
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ p_buf->p_val = &p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ p_buf->p_val = &p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ p_buf->p_val = &p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ p_buf->p_val = &p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ p_buf->p_val = &p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ idx = p_tlv->tlv_type -
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1;
+ if (p_drv_buf->flogi_param_set[idx]) {
+ p_buf->p_val = &p_drv_buf->flogi_param[idx];
+ return sizeof(p_drv_buf->flogi_param[idx]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp,
+ p_buf);
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ idx = p_tlv->tlv_type -
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1;
+
+ if (p_drv_buf->flogi_acc_param_set[idx]) {
+ p_buf->p_val = &p_drv_buf->flogi_acc_param[idx];
+ return sizeof(p_drv_buf->flogi_acc_param[idx]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp,
+ p_buf);
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ p_buf->p_val = &p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp,
+ p_buf);
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ p_buf->p_val = &p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ p_buf->p_val = &p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ p_buf->p_val = &p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ p_buf->p_val = &p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ p_buf->p_val = &p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ p_buf->p_val = &p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2;
+
+ if (p_drv_buf->plogi_dst_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx];
+ return sizeof(p_drv_buf->plogi_dst_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx],
+ p_buf);
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2;
+
+ if (p_drv_buf->plogi_acc_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2;
+ p_time = &p_drv_buf->plogi_acc_tstamp[idx];
+
+ return qed_mfw_get_tlv_time_value(p_time, p_buf);
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ p_buf->p_val = &p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ p_buf->p_val = &p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ p_buf->p_val = &p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) /
+ 2;
+
+ if (p_drv_buf->plogo_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx];
+ return sizeof(p_drv_buf->plogo_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx],
+ p_buf);
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ p_buf->p_val = &p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ p_buf->p_val = &p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ p_buf->p_val = &p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ p_buf->p_val = &p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ p_buf->p_val = &p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ p_buf->p_val = &p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ p_buf->p_val = &p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2;
+
+ if (p_drv_buf->abts_dst_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx];
+ return sizeof(p_drv_buf->abts_dst_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx],
+ p_buf);
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ p_buf->p_val = &p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1;
+
+ if (p_drv_buf->rx_rscn_nport_set[idx]) {
+ p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx];
+ return sizeof(p_drv_buf->rx_rscn_nport[idx]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ p_buf->p_val = &p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ p_buf->p_val = &p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ p_buf->p_val = &p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ p_buf->p_val = &p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ p_buf->p_val = &p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ p_buf->p_val = &p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ p_buf->p_val = &p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ p_buf->p_val = &p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ p_buf->p_val = &p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ p_buf->p_val = &p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ p_buf->p_val = &p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ p_buf->p_val = &p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ p_buf->p_val = &p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ p_buf->p_val = &p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ p_buf->p_val = &p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ p_buf->p_val = &p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ p_buf->p_val = &p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ p_buf->p_val = &p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ p_buf->p_val = &p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ p_buf->p_val = &p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2;
+
+ if (p_drv_buf->scsi_rx_chk_set[idx]) {
+ p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx];
+ return sizeof(p_drv_buf->scsi_rx_chk[idx]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2;
+ p_time = &p_drv_buf->scsi_chk_tstamp[idx];
+
+ return qed_mfw_get_tlv_time_value(p_time, p_buf);
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_iscsi *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ p_buf->p_val = &p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ p_buf->p_val = &p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ p_buf->p_val = &p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ p_buf->p_val = &p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ p_buf->p_val = &p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ p_buf->p_val = &p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ p_buf->p_val = &p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ p_buf->p_val = &p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ p_buf->p_val = &p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf, u32 size)
+{
+ union qed_mfw_tlv_data *p_tlv_data;
+ struct qed_tlv_parsed_buf buffer;
+ struct qed_drv_tlv_hdr tlv;
+ int len = 0;
+ u32 offset;
+ u8 *p_tlv;
+
+ p_tlv_data = vzalloc(sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return -ENOMEM;
+
+ if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) {
+ vfree(p_tlv_data);
+ return -EINVAL;
+ }
+
+ memset(&tlv, 0, sizeof(tlv));
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_tlv = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_tlv);
+ tlv.tlv_length = TLV_LENGTH(p_tlv);
+ tlv.tlv_flags = TLV_FLAGS(p_tlv);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
+ tlv.tlv_length, tlv.tlv_flags);
+
+ if (tlv_group == QED_MFW_TLV_GENERIC)
+ len = qed_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic,
+ &buffer);
+ else if (tlv_group == QED_MFW_TLV_ETH)
+ len = qed_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth,
+ &buffer);
+ else if (tlv_group == QED_MFW_TLV_FCOE)
+ len = qed_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe,
+ &buffer);
+ else
+ len = qed_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi,
+ &buffer);
+
+ if (len > 0) {
+ WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
+ len, 4 * tlv.tlv_length);
+ len = min_t(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED;
+ TLV_FLAGS(p_tlv) = tlv.tlv_flags;
+ memcpy(p_mfw_buf + offset + sizeof(tlv),
+ buffer.p_val, len);
+ }
+ }
+
+ vfree(p_tlv_data);
+
+ return 0;
+}
+
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val, global_offsize, global_addr;
+ u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp;
+ struct qed_drv_tlv_hdr tlv;
+ int rc;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + offsetof(struct public_global, data_ptr);
+ addr = qed_rd(p_hwfn, p_ptt, addr);
+ size = qed_rd(p_hwfn, p_ptt, global_addr +
+ offsetof(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = vzalloc(size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer. MFW represents the TLV in
+ * little endian format and mcp returns it bigendian format. Hence
+ * driver need to convert data to little endian first and then do the
+ * memcpy (casting) to preserve the MFW TLV format in the driver buffer.
+ *
+ */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = qed_rd(p_hwfn, p_ptt, addr + offset);
+ val = be32_to_cpu(val);
+ memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Un recognized TLV %d\n", tlv.tlv_type);
+ }
+
+ /* Sanitize the TLV groups according to personality */
+ if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping L2 TLVs for non-L2 function\n");
+ tlv_group &= ~QED_MFW_TLV_ETH;
+ }
+
+ if ((tlv_group & QED_MFW_TLV_FCOE) &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping FCoE TLVs for non-FCoE function\n");
+ tlv_group &= ~QED_MFW_TLV_FCOE;
+ }
+
+ if ((tlv_group & QED_MFW_TLV_ISCSI) &&
+ p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping iSCSI TLVs for non-iSCSI function\n");
+ tlv_group &= ~QED_MFW_TLV_ISCSI;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id)
+ if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
+ goto drv_done;
+ }
+
+ /* Write the TLV data to shared memory. The stream of 4 bytes first need
+ * to be mem-copied to u32 element to make it as LSB format. And then
+ * converted to big endian as required by mcp-write.
+ */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
+ val = cpu_to_be32(val);
+ qed_wr(p_hwfn, p_ptt, addr + offset, val);
+ }
+
+drv_done:
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ vfree(p_mfw_buf);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index a411f9c702a1..101d677114f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
goto free_cid_map;
}
+ /* Allocate bitmap for srqs */
+ p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
+ p_rdma_info->num_srqs, "SRQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate srq bitmap, rc = %d\n", rc);
+ goto free_real_cid_map;
+ }
+
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
rc = qed_iwarp_alloc(p_hwfn);
if (rc)
- goto free_cid_map;
+ goto free_srq_map;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
+free_srq_map:
+ kfree(p_rdma_info->srq_map.bitmap);
+free_real_cid_map:
+ kfree(p_rdma_info->real_cid_map.bitmap);
free_cid_map:
kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
@@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
if (cdev->rdma_max_sge)
dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+ dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
+ if (p_hwfn->cdev->rdma_max_srq_sge) {
+ dev->max_srq_sge = min_t(u32,
+ p_hwfn->cdev->rdma_max_srq_sge,
+ dev->max_srq_sge);
+ }
dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
dev->max_inline = (cdev->rdma_max_inline) ?
@@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
dev->max_pkey = QED_RDMA_MAX_P_KEY;
+ dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
+ dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
(RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
@@ -1484,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt,
case QED_RDMA_TID_FMR:
tid_type = RDMA_TID_FMR;
break;
- case QED_RDMA_TID_MW_TYPE1:
- tid_type = RDMA_TID_MW_TYPE1;
- break;
- case QED_RDMA_TID_MW_TYPE2A:
- tid_type = RDMA_TID_MW_TYPE2A;
+ case QED_RDMA_TID_MW:
+ tid_type = RDMA_TID_MW;
break;
default:
rc = -EINVAL;
@@ -1520,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt,
RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
params->dif_error_addr);
- DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
}
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
@@ -1628,6 +1648,155 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_LEADING_HWFN(cdev);
}
+static int qed_rdma_modify_srq(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *in_params)
+{
+ struct rdma_srq_modify_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data = {};
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_spq_entry *p_ent;
+ u16 opaque_fid;
+ int rc;
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_MODIFY_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rdma_modify_srq;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+ p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
+ in_params->srq_id);
+
+ return rc;
+}
+
+static int
+qed_rdma_destroy_srq(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *in_params)
+{
+ struct rdma_srq_destroy_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data = {};
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_spq_entry *p_ent;
+ struct qed_bmap *bmap;
+ u16 opaque_fid;
+ int rc;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ bmap = &p_hwfn->p_rdma_info->srq_map;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
+ in_params->srq_id);
+
+ return rc;
+}
+
+static int
+qed_rdma_create_srq(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *in_params,
+ struct qed_rdma_create_srq_out_params *out_params)
+{
+ struct rdma_srq_create_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data = {};
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ enum qed_cxt_elem_type elem_type;
+ struct qed_spq_entry *p_ent;
+ u16 opaque_fid, srq_id;
+ struct qed_bmap *bmap;
+ u32 returned_id;
+ int rc;
+
+ bmap = &p_hwfn->p_rdma_info->srq_map;
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
+ return rc;
+ }
+
+ elem_type = QED_ELEM_SRQ;
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
+ if (rc)
+ goto err;
+ /* returned id is no greater than u16 */
+ srq_id = (u16)returned_id;
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_srq;
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
+ p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
+ p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+ p_ramrod->page_size = cpu_to_le16(in_params->page_size);
+ DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->srq_id = srq_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "SRQ created Id = %x\n", out_params->srq_id);
+
+ return rc;
+
+err:
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, bmap, returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return rc;
+}
+
bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
{
bool result;
@@ -1773,6 +1942,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_free_tid = &qed_rdma_free_tid,
.rdma_register_tid = &qed_rdma_register_tid,
.rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .rdma_create_srq = &qed_rdma_create_srq,
+ .rdma_modify_srq = &qed_rdma_modify_srq,
+ .rdma_destroy_srq = &qed_rdma_destroy_srq,
.ll2_acquire_connection = &qed_ll2_acquire_connection,
.ll2_establish_connection = &qed_ll2_establish_connection,
.ll2_terminate_connection = &qed_ll2_terminate_connection,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 18ec9cbd84f5..6f722ee8ee94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -96,6 +96,8 @@ struct qed_rdma_info {
u8 num_cnqs;
u32 num_qps;
u32 num_mrs;
+ u32 num_srqs;
+ u16 srq_id_offset;
u16 queue_zone_base;
u16 max_queue_zones;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f7122059b6b5..d8ad2dcad8d5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -178,7 +178,7 @@
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
-#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+#define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL
#define MISCS_REG_CHIP_NUM \
0x00976cUL
@@ -1621,6 +1621,7 @@
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL
#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 6acfd43c1a4f..b5ce1581645f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -65,6 +65,8 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
+ struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
+
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid =
(u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
@@ -75,11 +77,18 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn,
*/
qed_roce_free_real_icid(p_hwfn, icid);
} else {
- struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
+ if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
+ fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
+ u16 srq_id = (u16)data->rdma_data.async_handle.lo;
+
+ events.affiliated_event(events.context, fw_event_code,
+ &srq_id);
+ } else {
+ union rdma_eqe_data rdata = data->rdma_data;
- events->affiliated_event(p_hwfn->p_rdma_info->events.context,
- fw_event_code,
- (void *)&data->rdma_data.async_handle);
+ events.affiliated_event(events.context, fw_event_code,
+ (void *)&rdata.async_handle);
+ }
}
return 0;
@@ -672,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
- u32 *num_invalidated_mw,
u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
@@ -683,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
-
- *num_invalidated_mw = 0;
*cq_prod = qp->cq_prod;
if (!qp->resp_offloaded) {
@@ -733,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
- *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
qp->cq_prod = *cq_prod;
@@ -755,8 +760,7 @@ err:
}
static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- u32 *num_bound_mw)
+ struct qed_rdma_qp *qp)
{
struct roce_destroy_qp_req_output_params *p_ramrod_res;
struct roce_destroy_qp_req_ramrod_data *p_ramrod;
@@ -798,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
- *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -959,8 +962,6 @@ err_resp:
int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
- u32 num_invalidated_mw = 0;
- u32 num_bound_mw = 0;
u32 cq_prod;
int rc;
@@ -975,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw,
&cq_prod);
if (rc)
return rc;
/* Send destroy requester ramrod */
- rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
- &num_bound_mw);
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
if (rc)
return rc;
-
- if (num_invalidated_mw != num_bound_mw) {
- DP_NOTICE(p_hwfn,
- "number of invalidate memory windows is different from bounded ones\n");
- return -EINVAL;
- }
}
return 0;
@@ -1001,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
enum qed_roce_qp_state prev_state,
struct qed_rdma_modify_qp_in_params *params)
{
- u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0;
/* Perform additional operations according to the current state and the
@@ -1081,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
/* Send destroy responder ramrod */
rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
qp,
- &num_invalidated_mw,
&cq_prod);
if (rc)
@@ -1089,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
qp->cq_prod = cq_prod;
- rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
- &num_bound_mw);
-
- if (num_invalidated_mw != num_bound_mw) {
- DP_NOTICE(p_hwfn,
- "number of invalidate memory windows is different from bounded ones\n");
- return -EINVAL;
- }
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
} else {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index ab4ad8a1e2a5..e95431f6acd4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param p_tunn
- * @param mode
* @param allow_npar_tx_switch
*
* @return int
@@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
- enum qed_mf_mode mode, bool allow_npar_tx_switch);
+ bool allow_npar_tx_switch);
/**
* @brief qed_sp_pf_update - PF Function Update Ramrod
@@ -463,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
* @return int
*/
+/**
+ * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
+
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 5e927b6cac22..8de644b4721e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
- enum qed_mf_mode mode, bool allow_npar_tx_switch)
+ bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
- u8 page_cnt;
+ u8 page_cnt, i;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -339,21 +339,36 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
- switch (mode) {
- case QED_MF_DEFAULT:
- case QED_MF_NPAR:
- p_ramrod->mf_mode = MF_NPAR;
- break;
- case QED_MF_OVLAN:
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
p_ramrod->mf_mode = MF_OVLAN;
- break;
- default:
- DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ else
p_ramrod->mf_mode = MF_NPAR;
- }
p_ramrod->outer_tag_config.outer_tag.tci =
- cpu_to_le16(p_hwfn->hw_info.ovlan);
+ cpu_to_le16(p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+ } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ }
+
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+
+ /* enable_stag_pri_change should be set if port is in BD mode or,
+ * UFP with Host Control mode.
+ */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ else
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+
+ p_ramrod->outer_tag_config.outer_tag.tci |=
+ cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
+ }
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -365,7 +380,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
- if (IS_MF_SI(p_hwfn))
+ if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
@@ -434,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EOPNOTSUPP;
+
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
+ DP_INFO(p_hwfn, "Invalid priority type %d\n",
+ p_hwfn->ufp_info.pri_type);
+ return -EINVAL;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+ else
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 5acb91b3564c..f01bf52bc381 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -48,7 +48,7 @@ static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code);
-
+static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
@@ -1790,7 +1790,8 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
- if (events & BIT(MAC_ADDR_FORCED)) {
+ if ((events & BIT(MAC_ADDR_FORCED)) ||
+ p_vf->p_vf_info.is_trusted_configured) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1809,8 +1810,12 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
"PF failed to configure MAC for VF\n");
return rc;
}
-
- p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ if (p_vf->p_vf_info.is_trusted_configured)
+ p_vf->configured_features |=
+ BIT(VFPF_BULLETIN_MAC_ADDR);
+ else
+ p_vf->configured_features |=
+ BIT(MAC_ADDR_FORCED);
}
if (events & BIT(VLAN_ADDR_FORCED)) {
@@ -3170,6 +3175,10 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
+ /* Don't keep track of shadow copy since we don't intend to restore. */
+ if (p_vf->p_vf_info.is_trusted_configured)
+ return 0;
+
/* First remove entries and then add new ones */
if (p_params->opcode == QED_FILTER_REMOVE) {
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
@@ -3244,9 +3253,17 @@ static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
/* No real decision to make; Store the configured MAC */
if (params->type == QED_FILTER_MAC ||
- params->type == QED_FILTER_MAC_VLAN)
+ params->type == QED_FILTER_MAC_VLAN) {
ether_addr_copy(vf->mac, params->mac);
+ if (vf->is_trusted_configured) {
+ qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
+
+ /* Update and post bulleitin again */
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ }
+
return 0;
}
@@ -3803,6 +3820,40 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
}
+static int
+qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct vfpf_bulletin_update_mac_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
+
+ if (!p_vf->p_vf_info.is_trusted_configured) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Blocking bulletin update request from untrusted VF[%d]\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ rc = -EINVAL;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->bulletin_update_mac;
+ ether_addr_copy(p_bulletin->mac, p_req->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
+ p_vf->abs_vf_id, p_req->mac);
+
+send_status:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ sizeof(struct pfvf_def_resp_tlv), status);
+ return rc;
+}
+
static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid)
{
@@ -3882,6 +3933,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_READ:
qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
+ qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -4081,16 +4135,60 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
return;
}
- feature = 1 << MAC_ADDR_FORCED;
+ if (vf_info->p_vf_info.is_trusted_configured) {
+ feature = BIT(VFPF_BULLETIN_MAC_ADDR);
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~BIT(MAC_ADDR_FORCED);
+ } else {
+ feature = BIT(MAC_ADDR_FORCED);
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~BIT(VFPF_BULLETIN_MAC_ADDR);
+ }
+
memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- /* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
+ vfid);
+ return -EINVAL;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return -EINVAL;
+ }
+
+ if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set MAC, Forced MAC is configured\n");
+ return -EINVAL;
+ }
+
+ feature = BIT(VFPF_BULLETIN_MAC_ADDR);
+ ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ if (vf_info->p_vf_info.is_trusted_configured)
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
+ return 0;
+}
+
static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
u16 pvid, int vfid)
{
@@ -4204,6 +4302,21 @@ out:
return rc;
}
+static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ BIT(VFPF_BULLETIN_MAC_ADDR)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
@@ -4493,8 +4606,12 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
if (!vf_info)
continue;
- /* Set the forced MAC, and schedule the IOV task */
- ether_addr_copy(vf_info->forced_mac, mac);
+ /* Set the MAC, and schedule the IOV task */
+ if (vf_info->is_trusted_configured)
+ ether_addr_copy(vf_info->mac, mac);
+ else
+ ether_addr_copy(vf_info->forced_mac, mac);
+
qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
}
@@ -4802,6 +4919,33 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
+static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
+ u8 *mac,
+ struct qed_public_vf_info *info)
+{
+ if (info->is_trusted_configured) {
+ if (is_valid_ether_addr(info->mac) &&
+ (!mac || !ether_addr_equal(mac, info->mac)))
+ return true;
+ } else {
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac)))
+ return true;
+ }
+
+ return false;
+}
+
+static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
+ struct qed_public_vf_info *info,
+ int vfid)
+{
+ if (info->is_trusted_configured)
+ qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
+ else
+ qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
+}
+
static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
{
int i;
@@ -4816,18 +4960,20 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
continue;
/* Update data on bulletin board */
- mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
- if (is_valid_ether_addr(info->forced_mac) &&
- (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+ if (info->is_trusted_configured)
+ mac = qed_iov_bulletin_get_mac(hwfn, i);
+ else
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+
+ if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
DP_VERBOSE(hwfn,
QED_MSG_IOV,
"Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
i,
hwfn->cdev->p_iov_info->first_vf_in_pf + i);
- /* Update bulletin board with forced MAC */
- qed_iov_bulletin_set_forced_mac(hwfn,
- info->forced_mac, i);
+ /* Update bulletin board with MAC */
+ qed_set_bulletin_mac(hwfn, info, i);
update = true;
}
@@ -4867,6 +5013,72 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
+static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
+{
+ struct qed_public_vf_info *vf_info;
+ struct qed_vf_info *vf;
+ u8 *force_mac;
+ int i;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ vf = qed_iov_get_vf_info(hwfn, vf_id, true);
+
+ if (!vf_info || !vf)
+ return;
+
+ /* Force MAC converted to generic MAC in case of VF trust on */
+ if (vf_info->is_trusted_configured &&
+ (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
+ force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
+
+ if (force_mac) {
+ /* Clear existing shadow copy of MAC to have a clean
+ * slate.
+ */
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(vf->shadow_config.macs[i],
+ vf_info->mac)) {
+ memset(vf->shadow_config.macs[i], 0,
+ ETH_ALEN);
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
+ vf_info->mac, vf_id);
+ break;
+ }
+ }
+
+ ether_addr_copy(vf_info->mac, force_mac);
+ memset(vf_info->forced_mac, 0, ETH_ALEN);
+ vf->bulletin.p_virt->valid_bitmap &=
+ ~BIT(MAC_ADDR_FORCED);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ }
+
+ /* Update shadow copy with VF MAC when trust mode is turned off */
+ if (!vf_info->is_trusted_configured) {
+ u8 empty_mac[ETH_ALEN];
+
+ memset(empty_mac, 0, ETH_ALEN);
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(vf->shadow_config.macs[i],
+ empty_mac)) {
+ ether_addr_copy(vf->shadow_config.macs[i],
+ vf_info->mac);
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
+ vf_info->mac, vf_id);
+ break;
+ }
+ }
+ /* Clear bulletin when trust mode is turned off,
+ * to have a clean slate for next (normal) operations.
+ */
+ qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
{
struct qed_sp_vport_update_params params;
@@ -4890,6 +5102,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
continue;
vf_info->is_trusted_configured = vf_info->is_trusted_request;
+ /* Handle forced MAC mode */
+ qed_update_mac_for_vf_trust_change(hwfn, i);
+
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
if (!vf->vport_instance)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 91b5e9f02a62..2d7fcd6a0777 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1375,6 +1375,35 @@ exit:
}
int
+qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ u8 *p_mac)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_bulletin_update_mac_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ int rc;
+
+ if (!p_mac)
+ return -EINVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ sizeof(*p_req));
+ ether_addr_copy(p_req->mac, p_mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requesting bulletin update for MAC[%pM]\n", p_mac);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+int
qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 97d44dfb38ca..4f05d5eb3cf5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -518,6 +518,12 @@ struct pfvf_read_coal_resp_tlv {
u8 padding[6];
};
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -532,6 +538,7 @@ union vfpf_tlvs {
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
struct tlv_buffer_size tlv_buf_size;
};
@@ -650,6 +657,7 @@ enum {
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -1042,6 +1050,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn);
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
+/**
+ * @brief - Ask PF to update the MAC address in it's bulletin board
+ *
+ * @param p_mac - mac address to be updated in bulletin board
+ */
+int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
+
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -1228,6 +1243,12 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ u8 *p_mac)
+{
+ return -EINVAL;
+}
+
static inline u32
qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9935978c5542..d7ed0d3dbf71 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -75,6 +75,7 @@ struct qede_stats_common {
u64 rx_bcast_pkts;
u64 mftag_filter_discards;
u64 mac_filter_discards;
+ u64 gft_filter_drop;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
@@ -87,6 +88,7 @@ struct qede_stats_common {
u64 coalesced_aborts_num;
u64 non_coalesced_pkts;
u64 coalesced_bytes;
+ u64 link_change_count;
/* port */
u64 rx_64_byte_packets;
@@ -290,15 +292,12 @@ struct qede_agg_info {
* aggregation.
*/
struct sw_rx_data buffer;
- dma_addr_t buffer_mapping;
-
struct sk_buff *skb;
/* We need some structs from the start cookie until termination */
u16 vlan_tag;
- u16 start_cqe_bd_len;
- u8 start_cqe_placement_offset;
+ bool tpa_start_fail;
u8 state;
u8 frag_id;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ecbf1ded7a39..f4a0f8ff8261 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -161,6 +161,7 @@ static const struct {
QEDE_STAT(no_buff_discards),
QEDE_PF_STAT(mftag_filter_discards),
QEDE_PF_STAT(mac_filter_discards),
+ QEDE_PF_STAT(gft_filter_drop),
QEDE_STAT(tx_err_drop_pkts),
QEDE_STAT(ttl0_discard),
QEDE_STAT(packet_too_big_discard),
@@ -170,6 +171,8 @@ static const struct {
QEDE_STAT(coalesced_aborts_num),
QEDE_STAT(non_coalesced_pkts),
QEDE_STAT(coalesced_bytes),
+
+ QEDE_STAT(link_change_count),
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
@@ -1508,7 +1511,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
len = le16_to_cpu(fp_cqe->len_on_first_bd);
data_ptr = (u8 *)(page_address(sw_rx_data->data) +
fp_cqe->placement_offset +
- sw_rx_data->page_offset);
+ sw_rx_data->page_offset +
+ rxq->rx_headroom);
if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
ether_addr_equal(data_ptr + ETH_ALEN,
edev->ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 6687e04d1558..e9e088d9c815 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -38,6 +38,7 @@
#include <linux/qed/qed_if.h>
#include "qede.h"
+#define QEDE_FILTER_PRINT_MAX_LEN (64)
struct qede_arfs_tuple {
union {
__be32 src_ipv4;
@@ -51,6 +52,18 @@ struct qede_arfs_tuple {
__be16 dst_port;
__be16 eth_proto;
u8 ip_proto;
+
+ /* Describe filtering mode needed for this kind of filter */
+ enum qed_filter_config_mode mode;
+
+ /* Used to compare new/old filters. Return true if IPs match */
+ bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
+
+ /* Given an address into ethhdr build a header from tuple info */
+ void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
+
+ /* Stringify the tuple for a print into the provided buffer */
+ void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
};
struct qede_arfs_fltr_node {
@@ -73,9 +86,11 @@ struct qede_arfs_fltr_node {
u16 sw_id;
u16 rxq_id;
u16 next_rxq_id;
+ u8 vfid;
bool filter_op;
bool used;
u8 fw_rc;
+ bool b_is_drop;
struct hlist_node node;
};
@@ -90,7 +105,9 @@ struct qede_arfs {
spinlock_t arfs_list_lock;
unsigned long *arfs_fltr_bmap;
int filter_count;
- bool enable;
+
+ /* Currently configured filtering mode */
+ enum qed_filter_config_mode mode;
};
static void qede_configure_arfs_fltr(struct qede_dev *edev,
@@ -109,12 +126,22 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
params.length = n->buf_len;
params.qid = rxq_id;
params.b_is_add = add_fltr;
+ params.b_is_drop = n->b_is_drop;
+
+ if (n->vfid) {
+ params.b_is_vf = true;
+ params.vf_id = n->vfid - 1;
+ }
- DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
- "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
- add_fltr ? "Adding" : "Deleting",
- n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
- ntohs(n->tuple.dst_port), rxq_id);
+ if (n->tuple.stringify) {
+ char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
+
+ n->tuple.stringify(&n->tuple, tuple_buffer);
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+ "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
+ add_fltr ? "Adding" : "Deleting",
+ n->sw_id, tuple_buffer, n->vfid, rxq_id);
+ }
n->used = true;
n->filter_op = add_fltr;
@@ -145,14 +172,13 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
INIT_HLIST_NODE(&fltr->node);
hlist_add_head(&fltr->node,
QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
- edev->arfs->filter_count++;
-
- if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
- enum qed_filter_config_mode mode;
- mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
- edev->ops->configure_arfs_searcher(edev->cdev, mode);
- edev->arfs->enable = true;
+ edev->arfs->filter_count++;
+ if (edev->arfs->filter_count == 1 &&
+ edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
+ edev->ops->configure_arfs_searcher(edev->cdev,
+ fltr->tuple.mode);
+ edev->arfs->mode = fltr->tuple.mode;
}
return 0;
@@ -167,14 +193,15 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
fltr->buf_len, DMA_TO_DEVICE);
qede_free_arfs_filter(edev, fltr);
- edev->arfs->filter_count--;
- if (!edev->arfs->filter_count && edev->arfs->enable) {
+ edev->arfs->filter_count--;
+ if (!edev->arfs->filter_count &&
+ edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
enum qed_filter_config_mode mode;
mode = QED_FILTER_CONFIG_MODE_DISABLE;
- edev->arfs->enable = false;
edev->ops->configure_arfs_searcher(edev->cdev, mode);
+ edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
}
}
@@ -264,25 +291,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
}
}
+#ifdef CONFIG_RFS_ACCEL
spin_lock_bh(&edev->arfs->arfs_list_lock);
- if (!edev->arfs->filter_count) {
- if (edev->arfs->enable) {
- enum qed_filter_config_mode mode;
-
- mode = QED_FILTER_CONFIG_MODE_DISABLE;
- edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, mode);
- }
-#ifdef CONFIG_RFS_ACCEL
- } else {
+ if (edev->arfs->filter_count) {
set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task,
QEDE_SP_TASK_POLL_DELAY);
-#endif
}
spin_unlock_bh(&edev->arfs->arfs_list_lock);
+#endif
}
/* This function waits until all aRFS filters get deleted and freed.
@@ -512,6 +531,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
eth->h_proto = skb->protocol;
n->tuple.eth_proto = skb->protocol;
n->tuple.ip_proto = ip_proto;
+ n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
@@ -550,8 +570,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
__qede_lock(edev);
- /* MAC hints take effect only if we haven't set one already */
- if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
+ if (!is_valid_ether_addr(mac)) {
__qede_unlock(edev);
return;
}
@@ -1161,6 +1180,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
if (edev->state != QEDE_STATE_OPEN) {
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
"The device is currently down\n");
+ /* Ask PF to explicitly update a copy in bulletin board */
+ if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
+ edev->ops->req_bulletin_update_mac(edev->cdev,
+ ndev->dev_addr);
goto out;
}
@@ -1336,38 +1359,6 @@ qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
return NULL;
}
-static bool
-qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos,
- struct ethtool_rx_flow_spec *fsp,
- __be16 proto)
-{
- if (proto == htons(ETH_P_IP)) {
- struct ethtool_tcpip4_spec *ip;
-
- ip = &fsp->h_u.tcp_ip4_spec;
-
- if (tpos->tuple.src_ipv4 == ip->ip4src &&
- tpos->tuple.dst_ipv4 == ip->ip4dst)
- return true;
- else
- return false;
- } else {
- struct ethtool_tcpip6_spec *ip6;
- struct in6_addr *src;
-
- ip6 = &fsp->h_u.tcp_ip6_spec;
- src = &tpos->tuple.src_ipv6;
-
- if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) &&
- !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst,
- sizeof(struct in6_addr)))
- return true;
- else
- return false;
- }
- return false;
-}
-
int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
@@ -1452,102 +1443,444 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
fsp->ring_cookie = fltr->rxq_id;
+ if (fltr->vfid) {
+ fsp->ring_cookie |= ((u64)fltr->vfid) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ }
+
+ if (fltr->b_is_drop)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
unlock:
__qede_unlock(edev);
return rc;
}
static int
-qede_validate_and_check_flow_exist(struct qede_dev *edev,
- struct ethtool_rx_flow_spec *fsp,
- int *min_hlen)
+qede_poll_arfs_filter_config(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *fltr)
{
- __be16 src_port = 0x0, dst_port = 0x0;
- struct qede_arfs_fltr_node *fltr;
- struct hlist_node *temp;
- struct hlist_head *head;
- __be16 eth_proto;
- u8 ip_proto;
+ int count = QEDE_ARFS_POLL_COUNT;
- if (fsp->location >= QEDE_RFS_MAX_FLTR ||
- fsp->ring_cookie >= QEDE_RSS_COUNT(edev))
- return -EINVAL;
+ while (fltr->used && count) {
+ msleep(20);
+ count--;
+ }
+
+ if (count == 0 || fltr->fw_rc) {
+ DP_NOTICE(edev, "Timeout in polling filter config\n");
+ qede_dequeue_fltr_and_config_searcher(edev, fltr);
+ return -EIO;
+ }
+
+ return fltr->fw_rc;
+}
+
+static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
+{
+ int size = ETH_HLEN;
+
+ if (t->eth_proto == htons(ETH_P_IP))
+ size += sizeof(struct iphdr);
+ else
+ size += sizeof(struct ipv6hdr);
+
+ if (t->ip_proto == IPPROTO_TCP)
+ size += sizeof(struct tcphdr);
+ else
+ size += sizeof(struct udphdr);
+
+ return size;
+}
+
+static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
+ struct qede_arfs_tuple *b)
+{
+ if (a->eth_proto != htons(ETH_P_IP) ||
+ b->eth_proto != htons(ETH_P_IP))
+ return false;
+
+ return (a->src_ipv4 == b->src_ipv4) &&
+ (a->dst_ipv4 == b->dst_ipv4);
+}
+
+static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
+ void *header)
+{
+ __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
+ struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)header;
+
+ eth->h_proto = t->eth_proto;
+ ip->saddr = t->src_ipv4;
+ ip->daddr = t->dst_ipv4;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+ ip->protocol = t->ip_proto;
+ ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
+
+ /* ports is weakly typed to suit both TCP and UDP ports */
+ ports[0] = t->src_port;
+ ports[1] = t->dst_port;
+}
+
+static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
+ void *buffer)
+{
+ const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
+
+ snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
+ "%s %pI4 (%04x) -> %pI4 (%04x)",
+ prefix, &t->src_ipv4, t->src_port,
+ &t->dst_ipv4, t->dst_port);
+}
+
+static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
+ struct qede_arfs_tuple *b)
+{
+ if (a->eth_proto != htons(ETH_P_IPV6) ||
+ b->eth_proto != htons(ETH_P_IPV6))
+ return false;
+
+ if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
+ return false;
- if (fsp->flow_type == TCP_V4_FLOW) {
- *min_hlen += sizeof(struct iphdr) +
- sizeof(struct tcphdr);
- eth_proto = htons(ETH_P_IP);
- ip_proto = IPPROTO_TCP;
- } else if (fsp->flow_type == UDP_V4_FLOW) {
- *min_hlen += sizeof(struct iphdr) +
- sizeof(struct udphdr);
- eth_proto = htons(ETH_P_IP);
- ip_proto = IPPROTO_UDP;
- } else if (fsp->flow_type == TCP_V6_FLOW) {
- *min_hlen += sizeof(struct ipv6hdr) +
- sizeof(struct tcphdr);
- eth_proto = htons(ETH_P_IPV6);
- ip_proto = IPPROTO_TCP;
- } else if (fsp->flow_type == UDP_V6_FLOW) {
- *min_hlen += sizeof(struct ipv6hdr) +
- sizeof(struct udphdr);
- eth_proto = htons(ETH_P_IPV6);
- ip_proto = IPPROTO_UDP;
+ if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
+ return false;
+
+ return true;
+}
+
+static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
+ void *header)
+{
+ __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)header;
+
+ eth->h_proto = t->eth_proto;
+ memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
+ memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
+ ip6->version = 0x6;
+
+ if (t->ip_proto == IPPROTO_TCP) {
+ ip6->nexthdr = NEXTHDR_TCP;
+ ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
} else {
- DP_NOTICE(edev, "Unsupported flow type = 0x%x\n",
- fsp->flow_type);
- return -EPROTONOSUPPORT;
+ ip6->nexthdr = NEXTHDR_UDP;
+ ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
}
- if (eth_proto == htons(ETH_P_IP)) {
- src_port = fsp->h_u.tcp_ip4_spec.psrc;
- dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ /* ports is weakly typed to suit both TCP and UDP ports */
+ ports[0] = t->src_port;
+ ports[1] = t->dst_port;
+}
+
+/* Validate fields which are set and not accepted by the driver */
+static int qede_flow_spec_validate_unused(struct qede_dev *edev,
+ struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ DP_INFO(edev, "Don't support MAC extensions\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
+ DP_INFO(edev, "Don't support vlan-based classification\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] || fs->h_ext.data[1])) {
+ DP_INFO(edev, "Don't support user defined data\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ if ((fs->h_u.tcp_ip4_spec.ip4src &
+ fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
+ DP_INFO(edev, "Don't support IP-masks\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->h_u.tcp_ip4_spec.ip4dst &
+ fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
+ DP_INFO(edev, "Don't support IP-masks\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->h_u.tcp_ip4_spec.psrc &
+ fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
+ DP_INFO(edev, "Don't support port-masks\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->h_u.tcp_ip4_spec.pdst &
+ fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
+ DP_INFO(edev, "Don't support port-masks\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (fs->h_u.tcp_ip4_spec.tos) {
+ DP_INFO(edev, "Don't support tos\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->eth_proto = htons(ETH_P_IP);
+ t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
+ t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
+ t->src_port = fs->h_u.tcp_ip4_spec.psrc;
+ t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
+
+ /* We must either have a valid 4-tuple or only dst port
+ * or only src ip as an input
+ */
+ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !t->src_ipv4 && !t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !t->dst_ipv4 && t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
} else {
- src_port = fsp->h_u.tcp_ip6_spec.psrc;
- dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
}
- head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
- hlist_for_each_entry_safe(fltr, temp, head, node) {
- if ((fltr->tuple.ip_proto == ip_proto &&
- fltr->tuple.eth_proto == eth_proto &&
- qede_compare_user_flow_ips(fltr, fsp, eth_proto) &&
- fltr->tuple.src_port == src_port &&
- fltr->tuple.dst_port == dst_port) ||
- fltr->sw_id == fsp->location)
- return -EEXIST;
+ t->ip_comp = qede_flow_spec_ipv4_cmp;
+ t->build_hdr = qede_flow_build_ipv4_hdr;
+ t->stringify = qede_flow_stringify_ipv4_hdr;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ t->ip_proto = IPPROTO_TCP;
+
+ if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ t->ip_proto = IPPROTO_UDP;
+
+ if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct in6_addr zero_addr;
+ void *p;
+
+ p = &zero_addr;
+ memset(p, 0, sizeof(zero_addr));
+
+ if ((fs->h_u.tcp_ip6_spec.psrc &
+ fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
+ DP_INFO(edev, "Don't support port-masks\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->h_u.tcp_ip6_spec.pdst &
+ fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
+ DP_INFO(edev, "Don't support port-masks\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (fs->h_u.tcp_ip6_spec.tclass) {
+ DP_INFO(edev, "Don't support tclass\n");
+ return -EOPNOTSUPP;
}
+ t->eth_proto = htons(ETH_P_IPV6);
+ memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ t->src_port = fs->h_u.tcp_ip6_spec.psrc;
+ t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
+
+ /* We must make sure we have a valid 4-tuple or only dest port
+ * or only src ip as an input
+ */
+ if (t->src_port && t->dst_port &&
+ memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
+ memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
+ !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
+ memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv6_cmp;
+ t->build_hdr = qede_flow_build_ipv6_hdr;
+
return 0;
}
-static int
-qede_poll_arfs_filter_config(struct qede_dev *edev,
- struct qede_arfs_fltr_node *fltr)
+static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
{
- int count = QEDE_ARFS_POLL_COUNT;
+ t->ip_proto = IPPROTO_TCP;
- while (fltr->used && count) {
- msleep(20);
- count--;
+ if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ t->ip_proto = IPPROTO_UDP;
+
+ if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_tuple(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ memset(t, 0, sizeof(*t));
+
+ if (qede_flow_spec_validate_unused(edev, fs))
+ return -EOPNOTSUPP;
+
+ switch ((fs->flow_type & ~FLOW_EXT)) {
+ case TCP_V4_FLOW:
+ return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
+ case UDP_V4_FLOW:
+ return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
+ case TCP_V6_FLOW:
+ return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
+ case UDP_V6_FLOW:
+ return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
+ default:
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Can't support flow of type %08x\n", fs->flow_type);
+ return -EOPNOTSUPP;
}
- if (count == 0 || fltr->fw_rc) {
- qede_dequeue_fltr_and_config_searcher(edev, fltr);
- return -EIO;
+ return 0;
+}
+
+static int qede_flow_spec_validate(struct qede_dev *edev,
+ struct ethtool_rx_flow_spec *fs,
+ struct qede_arfs_tuple *t)
+{
+ if (fs->location >= QEDE_RFS_MAX_FLTR) {
+ DP_INFO(edev, "Location out-of-bounds\n");
+ return -EINVAL;
}
- return fltr->fw_rc;
+ /* Check location isn't already in use */
+ if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
+ DP_INFO(edev, "Location already in use\n");
+ return -EINVAL;
+ }
+
+ /* Check if the filtering-mode could support the filter */
+ if (edev->arfs->filter_count &&
+ edev->arfs->mode != t->mode) {
+ DP_INFO(edev,
+ "flow_spec would require filtering mode %08x, but %08x is configured\n",
+ t->mode, edev->arfs->filter_count);
+ return -EINVAL;
+ }
+
+ /* If drop requested then no need to validate other data */
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ return 0;
+
+ if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
+ return 0;
+
+ if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
+ DP_INFO(edev, "Queue out-of-bounds\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Must be called while qede lock is held */
+static struct qede_arfs_fltr_node *
+qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
+{
+ struct qede_arfs_fltr_node *fltr;
+ struct hlist_node *temp;
+ struct hlist_head *head;
+
+ head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
+
+ hlist_for_each_entry_safe(fltr, temp, head, node) {
+ if (fltr->tuple.ip_proto == t->ip_proto &&
+ fltr->tuple.src_port == t->src_port &&
+ fltr->tuple.dst_port == t->dst_port &&
+ t->ip_comp(&fltr->tuple, t))
+ return fltr;
+ }
+
+ return NULL;
+}
+
+static void qede_flow_set_destination(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *n,
+ struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ n->b_is_drop = true;
+ return;
+ }
+
+ n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ n->next_rxq_id = n->rxq_id;
+
+ if (n->vfid)
+ DP_VERBOSE(edev, QED_MSG_SP,
+ "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
}
int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
{
struct ethtool_rx_flow_spec *fsp = &info->fs;
struct qede_arfs_fltr_node *n;
- int min_hlen = ETH_HLEN, rc;
- struct ethhdr *eth;
- struct iphdr *ip;
- __be16 *ports;
+ struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1556,16 +1889,28 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
goto unlock;
}
- rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen);
+ /* Translate the flow specification into something fittign our DB */
+ rc = qede_flow_spec_to_tuple(edev, &t, fsp);
if (rc)
goto unlock;
+ /* Make sure location is valid and filter isn't already set */
+ rc = qede_flow_spec_validate(edev, fsp, &t);
+ if (rc)
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
rc = -ENOMEM;
goto unlock;
}
+ min_hlen = qede_flow_get_min_header_size(&t);
n->data = kzalloc(min_hlen, GFP_KERNEL);
if (!n->data) {
kfree(n);
@@ -1576,68 +1921,13 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
n->sw_id = fsp->location;
set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
n->buf_len = min_hlen;
- n->rxq_id = fsp->ring_cookie;
- n->next_rxq_id = n->rxq_id;
- eth = (struct ethhdr *)n->data;
- if (info->fs.flow_type == TCP_V4_FLOW ||
- info->fs.flow_type == UDP_V4_FLOW) {
- ports = (__be16 *)(n->data + ETH_HLEN +
- sizeof(struct iphdr));
- eth->h_proto = htons(ETH_P_IP);
- n->tuple.eth_proto = htons(ETH_P_IP);
- n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src;
- n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst;
- n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc;
- n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst;
- ports[0] = n->tuple.src_port;
- ports[1] = n->tuple.dst_port;
- ip = (struct iphdr *)(n->data + ETH_HLEN);
- ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src;
- ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst;
- ip->version = 0x4;
- ip->ihl = 0x5;
-
- if (info->fs.flow_type == TCP_V4_FLOW) {
- n->tuple.ip_proto = IPPROTO_TCP;
- ip->protocol = IPPROTO_TCP;
- } else {
- n->tuple.ip_proto = IPPROTO_UDP;
- ip->protocol = IPPROTO_UDP;
- }
- ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN);
- } else {
- struct ipv6hdr *ip6;
-
- ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN);
- ports = (__be16 *)(n->data + ETH_HLEN +
- sizeof(struct ipv6hdr));
- eth->h_proto = htons(ETH_P_IPV6);
- n->tuple.eth_proto = htons(ETH_P_IPV6);
- memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src,
- sizeof(struct in6_addr));
- memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst,
- sizeof(struct in6_addr));
- n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc;
- n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst;
- ports[0] = n->tuple.src_port;
- ports[1] = n->tuple.dst_port;
- memcpy(&ip6->saddr, &n->tuple.src_ipv6,
- sizeof(struct in6_addr));
- memcpy(&ip6->daddr, &n->tuple.dst_ipv6,
- sizeof(struct in6_addr));
- ip6->version = 0x6;
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
- if (info->fs.flow_type == TCP_V6_FLOW) {
- n->tuple.ip_proto = IPPROTO_TCP;
- ip6->nexthdr = NEXTHDR_TCP;
- ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
- } else {
- n->tuple.ip_proto = IPPROTO_UDP;
- ip6->nexthdr = NEXTHDR_UDP;
- ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
- }
- }
+ qede_flow_set_destination(edev, n, fsp);
+
+ /* Build a minimal header according to the flow */
+ n->tuple.build_hdr(&n->tuple, n->data);
rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
if (rc)
@@ -1647,6 +1937,7 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
rc = qede_poll_arfs_filter_config(edev, n);
unlock:
__qede_unlock(edev);
+
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 14941303189d..6c702399b801 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -660,7 +660,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, tpa_info->frag_id++,
- current_bd->data, current_bd->page_offset,
+ current_bd->data,
+ current_bd->page_offset + rxq->rx_headroom,
len_on_bd);
if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
@@ -671,8 +672,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
goto out;
}
- qed_chain_consume(&rxq->rx_bd_ring);
- rxq->sw_rx_cons++;
+ qede_rx_bd_ring_consume(rxq);
skb->data_len += len_on_bd;
skb->truesize += rxq->rx_buf_seg_size;
@@ -721,64 +721,129 @@ static u8 qede_check_tunn_csum(u16 flag)
return QEDE_CSUM_UNNECESSARY | tcsum;
}
+static inline struct sk_buff *
+qede_build_skb(struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad)
+{
+ struct sk_buff *skb;
+ void *buf;
+
+ buf = page_address(bd->data) + bd->page_offset;
+ skb = build_skb(buf, rxq->rx_buf_seg_size);
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *
+qede_tpa_rx_build_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad,
+ bool alloc_skb)
+{
+ struct sk_buff *skb;
+
+ skb = qede_build_skb(rxq, bd, len, pad);
+ bd->page_offset += rxq->rx_buf_seg_size;
+
+ if (bd->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ DP_NOTICE(edev,
+ "Failed to allocate RX buffer for tpa start\n");
+ bd->page_offset -= rxq->rx_buf_seg_size;
+ page_ref_inc(bd->data);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ } else {
+ page_ref_inc(bd->data);
+ qede_reuse_page(rxq, bd);
+ }
+
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+
+ return skb;
+}
+
+static struct sk_buff *
+qede_rx_build_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad)
+{
+ struct sk_buff *skb = NULL;
+
+ /* For smaller frames still need to allocate skb, memcpy
+ * data and benefit in reusing the page segment instead of
+ * un-mapping it.
+ */
+ if ((len + pad <= edev->rx_copybreak)) {
+ unsigned int offset = bd->page_offset + pad;
+
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, pad);
+ memcpy(skb_put(skb, len),
+ page_address(bd->data) + offset, len);
+ qede_reuse_page(rxq, bd);
+ goto out;
+ }
+
+ skb = qede_build_skb(rxq, bd, len, pad);
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+ /* Incr page ref count to reuse on allocation failure so
+ * that it doesn't get freed while freeing SKB [as its
+ * already mapped there].
+ */
+ page_ref_inc(bd->data);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+out:
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+
+ return skb;
+}
+
static void qede_tpa_start(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct eth_fast_path_rx_tpa_start_cqe *cqe)
{
struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
- struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
- struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
- dma_addr_t mapping = tpa_info->buffer_mapping;
struct sw_rx_data *sw_rx_data_cons;
- struct sw_rx_data *sw_rx_data_prod;
+ u16 pad;
sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ pad = cqe->placement_offset + rxq->rx_headroom;
- /* Use pre-allocated replacement buffer - we can't release the agg.
- * start until its over and we don't want to risk allocation failing
- * here, so re-allocate when aggregation will be over.
- */
- sw_rx_data_prod->mapping = replace_buf->mapping;
-
- sw_rx_data_prod->data = replace_buf->data;
- rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
- sw_rx_data_prod->page_offset = replace_buf->page_offset;
-
- rxq->sw_rx_prod++;
+ tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
+ le16_to_cpu(cqe->len_on_first_bd),
+ pad, false);
+ tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
+ tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
- /* move partial skb from cons to pool (don't unmap yet)
- * save mapping, incase we drop the packet later on.
- */
- tpa_info->buffer = *sw_rx_data_cons;
- mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
- le32_to_cpu(rx_bd_cons->addr.lo));
-
- tpa_info->buffer_mapping = mapping;
- rxq->sw_rx_cons++;
-
- /* set tpa state to start only if we are able to allocate skb
- * for this aggregation, otherwise mark as error and aggregation will
- * be dropped
- */
- tpa_info->skb = netdev_alloc_skb(edev->ndev,
- le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+
+ /* Consume from ring but do not produce since
+ * this might be used by FW still, it will be re-used
+ * at TPA end.
+ */
+ tpa_info->tpa_start_fail = true;
+ qede_rx_bd_ring_consume(rxq);
tpa_info->state = QEDE_AGG_STATE_ERROR;
goto cons_buf;
}
- /* Start filling in the aggregation info */
- skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
tpa_info->frag_id = 0;
tpa_info->state = QEDE_AGG_STATE_START;
- /* Store some information from first CQE */
- tpa_info->start_cqe_placement_offset = cqe->placement_offset;
- tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
if ((le16_to_cpu(cqe->pars_flags.flags) >>
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
@@ -899,6 +964,10 @@ static int qede_tpa_end(struct qede_dev *edev,
tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
skb = tpa_info->skb;
+ if (tpa_info->buffer.page_offset == PAGE_SIZE)
+ dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
+ PAGE_SIZE, rxq->data_direction);
+
for (i = 0; cqe->len_list[i]; i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -919,11 +988,6 @@ static int qede_tpa_end(struct qede_dev *edev,
"Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
le16_to_cpu(cqe->total_packet_len), skb->len);
- memcpy(skb->data,
- page_address(tpa_info->buffer.data) +
- tpa_info->start_cqe_placement_offset +
- tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
-
/* Finalize the SKB */
skb->protocol = eth_type_trans(skb, edev->ndev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -940,6 +1004,12 @@ static int qede_tpa_end(struct qede_dev *edev,
return 1;
err:
tpa_info->state = QEDE_AGG_STATE_NONE;
+
+ if (tpa_info->tpa_start_fail) {
+ qede_reuse_page(rxq, &tpa_info->buffer);
+ tpa_info->tpa_start_fail = false;
+ }
+
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
return 0;
@@ -1058,65 +1128,6 @@ static bool qede_rx_xdp(struct qede_dev *edev,
return false;
}
-static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- struct sw_rx_data *bd, u16 len,
- u16 pad)
-{
- unsigned int offset = bd->page_offset + pad;
- struct skb_frag_struct *frag;
- struct page *page = bd->data;
- unsigned int pull_len;
- struct sk_buff *skb;
- unsigned char *va;
-
- /* Allocate a new SKB with a sufficient large header len */
- skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
- if (unlikely(!skb))
- return NULL;
-
- /* Copy data into SKB - if it's small, we can simply copy it and
- * re-use the already allcoated & mapped memory.
- */
- if (len + pad <= edev->rx_copybreak) {
- skb_put_data(skb, page_address(page) + offset, len);
- qede_reuse_page(rxq, bd);
- goto out;
- }
-
- frag = &skb_shinfo(skb)->frags[0];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, offset, len, rxq->rx_buf_seg_size);
-
- va = skb_frag_address(frag);
- pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
- /* Align the pull_len to optimize memcpy */
- memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
- /* Correct the skb & frag sizes offset after the pull */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
- /* Incr page ref count to reuse on allocation failure so
- * that it doesn't get freed while freeing SKB [as its
- * already mapped there].
- */
- page_ref_inc(page);
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
-out:
- /* We've consumed the first BD and prepared an SKB */
- qede_rx_bd_ring_consume(rxq);
- return skb;
-}
-
static int qede_rx_build_jumbo(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct sk_buff *skb,
@@ -1157,7 +1168,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
PAGE_SIZE, DMA_FROM_DEVICE);
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
- bd->data, 0, cur_size);
+ bd->data, rxq->rx_headroom, cur_size);
skb->truesize += PAGE_SIZE;
skb->data_len += cur_size;
@@ -1256,7 +1267,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
/* Basic validation passed; Need to prepare an SKB. This would also
* guarantee to finally consume the first BD upon success.
*/
- skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+ skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
if (!skb) {
rxq->rx_alloc_errors++;
qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index f6655e251bbd..6a796040a32c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_get_eth_tlv_data(void *edev, void *data);
+static void qede_get_generic_tlv_data(void *edev,
+ struct qed_generic_tlvs *data);
/* The qede lock is used to protect driver state change and driver flows that
* are not reentrant.
@@ -199,7 +202,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
/* Enable/Disable Tx switching for PF */
if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
- qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
vport_params->vport_id = 0;
vport_params->update_tx_switching_flg = 1;
vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
@@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = {
.arfs_filter_op = qede_arfs_filter_op,
#endif
.link_update = qede_link_update,
+ .get_generic_tlv_data = qede_get_generic_tlv_data,
+ .get_protocol_tlv_data = qede_get_eth_tlv_data,
},
.force_mac = qede_force_mac,
.ports_update = qede_udp_ports_update,
@@ -342,6 +347,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
p_common->mac_filter_discards = stats.common.mac_filter_discards;
+ p_common->gft_filter_drop = stats.common.gft_filter_drop;
p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
@@ -393,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_common->brb_truncates = stats.common.brb_truncates;
p_common->brb_discards = stats.common.brb_discards;
p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+ p_common->link_change_count = stats.common.link_change_count;
if (QEDE_IS_BB(edev)) {
struct qede_stats_bb *p_bb = &edev->stats.bb;
@@ -1196,30 +1203,8 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
}
}
-static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
-{
- int i;
-
- if (edev->gro_disable)
- return;
-
- for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
- struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
-
- if (replace_buf->data) {
- dma_unmap_page(&edev->pdev->dev,
- replace_buf->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(replace_buf->data);
- }
- }
-}
-
static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
- qede_free_sge_mem(edev, rxq);
-
/* Free rx buffers */
qede_free_rx_buffers(edev, rxq);
@@ -1231,45 +1216,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
}
-static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+static void qede_set_tpa_param(struct qede_rx_queue *rxq)
{
- dma_addr_t mapping;
int i;
- if (edev->gro_disable)
- return 0;
-
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
-
- replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
- if (unlikely(!replace_buf->data)) {
- DP_NOTICE(edev,
- "Failed to allocate TPA skb pool [replacement buffer]\n");
- goto err;
- }
-
- mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- DP_NOTICE(edev,
- "Failed to map TPA replacement buffer\n");
- goto err;
- }
- replace_buf->mapping = mapping;
- tpa_info->buffer.page_offset = 0;
- tpa_info->buffer_mapping = mapping;
tpa_info->state = QEDE_AGG_STATE_NONE;
}
-
- return 0;
-err:
- qede_free_sge_mem(edev, rxq);
- edev->gro_disable = 1;
- edev->ndev->features &= ~NETIF_F_GRO_HW;
- return -ENOMEM;
}
/* This function allocates all memory needed per Rx queue */
@@ -1280,19 +1235,24 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->num_rx_buffers = edev->q_num_rx_buffers;
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
- rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
+
+ rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
+ size = rxq->rx_headroom +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Make sure that the headroom and payload fit in a single page */
- if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
- rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
+ if (rxq->rx_buf_size + size > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts,
+ /* Segment size to spilt a page in multiple equal parts ,
* unless XDP is used in which case we'd use the entire page.
*/
- if (!edev->xdp_prog)
- rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
- else
+ if (!edev->xdp_prog) {
+ size = size + rxq->rx_buf_size;
+ rxq->rx_buf_seg_size = roundup_pow_of_two(size);
+ } else {
rxq->rx_buf_seg_size = PAGE_SIZE;
+ }
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -1336,7 +1296,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
}
- rc = qede_alloc_sge_mem(edev, rxq);
+ if (!edev->gro_disable)
+ qede_set_tpa_param(rxq);
err:
return rc;
}
@@ -1927,7 +1888,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
vport_update_params->update_vport_active_flg = 1;
vport_update_params->vport_active_flg = 1;
- if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
qed_info->tx_switching) {
vport_update_params->update_tx_switching_flg = 1;
vport_update_params->tx_switching_flg = 1;
@@ -2177,3 +2138,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
}
}
+
+static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ if (netif_xmit_stopped(netdev_txq))
+ return true;
+
+ return false;
+}
+
+static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+ struct qede_dev *edev = dev;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (edev->ndev->features & NETIF_F_IP_CSUM)
+ data->feat_flags |= QED_TLV_IP_CSUM;
+ if (edev->ndev->features & NETIF_F_TSO)
+ data->feat_flags |= QED_TLV_LSO;
+
+ ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
+ memset(data->mac[1], 0, ETH_ALEN);
+ memset(data->mac[2], 0, ETH_ALEN);
+ /* Copy the first two UC macs */
+ netif_addr_lock_bh(edev->ndev);
+ i = 1;
+ netdev_for_each_uc_addr(ha, edev->ndev) {
+ ether_addr_copy(data->mac[i++], ha->addr);
+ if (i == QED_TLV_MAC_COUNT)
+ break;
+ }
+
+ netif_addr_unlock_bh(edev->ndev);
+}
+
+static void qede_get_eth_tlv_data(void *dev, void *data)
+{
+ struct qed_mfw_tlv_eth *etlv = data;
+ struct qede_dev *edev = dev;
+ struct qede_fastpath *fp;
+ int i;
+
+ etlv->lso_maxoff_size = 0XFFFF;
+ etlv->lso_maxoff_size_set = true;
+ etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
+ etlv->lso_minseg_size_set = true;
+ etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
+ etlv->prom_mode_set = true;
+ etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
+ etlv->tx_descr_size_set = true;
+ etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
+ etlv->rx_descr_size_set = true;
+ etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
+ etlv->iov_offload_set = true;
+
+ /* Fill information regarding queues; Should be done under the qede
+ * lock to guarantee those don't change beneath our feet.
+ */
+ etlv->txqs_empty = true;
+ etlv->rxqs_empty = true;
+ etlv->num_txqs_full = 0;
+ etlv->num_rxqs_full = 0;
+
+ __qede_lock(edev);
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+ etlv->txqs_empty = false;
+ if (qede_is_txq_full(edev, fp->txq))
+ etlv->num_txqs_full++;
+ }
+ if (fp->type & QEDE_FASTPATH_RX) {
+ if (qede_has_rx_work(fp->rxq))
+ etlv->rxqs_empty = false;
+
+ /* This one is a bit tricky; Firmware might stop
+ * placing packets if ring is not yet full.
+ * Give an approximation.
+ */
+ if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
+ qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
+ RX_RING_SIZE - 100)
+ etlv->num_rxqs_full++;
+ }
+ }
+ __qede_unlock(edev);
+
+ etlv->txqs_empty_set = true;
+ etlv->rxqs_empty_set = true;
+ etlv->num_txqs_full_set = true;
+ etlv->num_rxqs_full_set = true;
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 8293c2028002..70de062b72a1 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2211,7 +2211,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ "cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
@@ -2258,7 +2258,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ "cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index d5a32b7c7dc5..031f6e6ee9c1 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -683,10 +683,11 @@ static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
struct emac_tx_queue *tx_q)
{
struct emac_ring_header *ring_header = &adpt->ring_header;
+ int node = dev_to_node(adpt->netdev->dev.parent);
size_t size;
size = sizeof(struct emac_buffer) * tx_q->tpd.count;
- tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL);
+ tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
if (!tx_q->tpd.tpbuff)
return -ENOMEM;
@@ -723,11 +724,12 @@ static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
static int emac_rx_descs_alloc(struct emac_adapter *adpt)
{
struct emac_ring_header *ring_header = &adpt->ring_header;
+ int node = dev_to_node(adpt->netdev->dev.parent);
struct emac_rx_queue *rx_q = &adpt->rx_q;
size_t size;
size = sizeof(struct emac_buffer) * rx_q->rfd.count;
- rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL);
+ rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node);
if (!rx_q->rfd.rfbuff)
return -ENOMEM;
@@ -920,14 +922,13 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
static void emac_adjust_link(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
- struct emac_sgmii *sgmii = &adpt->phy;
struct phy_device *phydev = netdev->phydev;
if (phydev->link) {
emac_mac_start(adpt);
- sgmii->link_up(adpt);
+ emac_sgmii_link_change(adpt, true);
} else {
- sgmii->link_down(adpt);
+ emac_sgmii_link_change(adpt, false);
emac_mac_stop(adpt);
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index e8ab512ee7e3..e78e5db39458 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -53,6 +53,46 @@
#define SERDES_START_WAIT_TIMES 100
+int emac_sgmii_init(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->init))
+ return 0;
+
+ return adpt->phy.sgmii_ops->init(adpt);
+}
+
+int emac_sgmii_open(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->open))
+ return 0;
+
+ return adpt->phy.sgmii_ops->open(adpt);
+}
+
+void emac_sgmii_close(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->close))
+ return;
+
+ adpt->phy.sgmii_ops->close(adpt);
+}
+
+int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->link_change))
+ return 0;
+
+ return adpt->phy.sgmii_ops->link_change(adpt, link_state);
+}
+
+void emac_sgmii_reset(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->reset))
+ return;
+
+ adpt->phy.sgmii_ops->reset(adpt);
+}
+
/* Initialize the SGMII link between the internal and external PHYs. */
static void emac_sgmii_link_init(struct emac_adapter *adpt)
{
@@ -163,21 +203,21 @@ static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
msleep(50);
}
-void emac_sgmii_reset(struct emac_adapter *adpt)
+static void emac_sgmii_common_reset(struct emac_adapter *adpt)
{
int ret;
emac_sgmii_reset_prepare(adpt);
emac_sgmii_link_init(adpt);
- ret = adpt->phy.initialize(adpt);
+ ret = emac_sgmii_init(adpt);
if (ret)
netdev_err(adpt->netdev,
"could not reinitialize internal PHY (error=%i)\n",
ret);
}
-static int emac_sgmii_open(struct emac_adapter *adpt)
+static int emac_sgmii_common_open(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = &adpt->phy;
int ret;
@@ -201,43 +241,63 @@ static int emac_sgmii_open(struct emac_adapter *adpt)
return 0;
}
-static int emac_sgmii_close(struct emac_adapter *adpt)
+static void emac_sgmii_common_close(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = &adpt->phy;
/* Make sure interrupts are disabled */
writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
free_irq(sgmii->irq, adpt);
-
- return 0;
}
/* The error interrupts are only valid after the link is up */
-static int emac_sgmii_link_up(struct emac_adapter *adpt)
+static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup)
{
struct emac_sgmii *sgmii = &adpt->phy;
int ret;
- /* Clear and enable interrupts */
- ret = emac_sgmii_irq_clear(adpt, 0xff);
- if (ret)
- return ret;
+ if (linkup) {
+ /* Clear and enable interrupts */
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
+ if (ret)
+ return ret;
- writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ writel(SGMII_ISR_MASK,
+ sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ } else {
+ /* Disable interrupts */
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ synchronize_irq(sgmii->irq);
+ }
return 0;
}
-static int emac_sgmii_link_down(struct emac_adapter *adpt)
-{
- struct emac_sgmii *sgmii = &adpt->phy;
+static struct sgmii_ops fsm9900_ops = {
+ .init = emac_sgmii_init_fsm9900,
+ .open = emac_sgmii_common_open,
+ .close = emac_sgmii_common_close,
+ .link_change = emac_sgmii_common_link_change,
+ .reset = emac_sgmii_common_reset,
+};
- /* Disable interrupts */
- writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
- synchronize_irq(sgmii->irq);
+static struct sgmii_ops qdf2432_ops = {
+ .init = emac_sgmii_init_qdf2432,
+ .open = emac_sgmii_common_open,
+ .close = emac_sgmii_common_close,
+ .link_change = emac_sgmii_common_link_change,
+ .reset = emac_sgmii_common_reset,
+};
- return 0;
-}
+#ifdef CONFIG_ACPI
+static struct sgmii_ops qdf2400_ops = {
+ .init = emac_sgmii_init_qdf2400,
+ .open = emac_sgmii_common_open,
+ .close = emac_sgmii_common_close,
+ .link_change = emac_sgmii_common_link_change,
+ .reset = emac_sgmii_common_reset,
+};
+#endif
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
@@ -249,7 +309,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
- emac_sgmii_function *initialize = data;
+ struct sgmii_ops **ops = data;
if (id) {
acpi_handle handle = ACPI_HANDLE(dev);
@@ -270,10 +330,10 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
switch (hrv) {
case 1:
- *initialize = emac_sgmii_init_qdf2432;
+ *ops = &qdf2432_ops;
return 1;
case 2:
- *initialize = emac_sgmii_init_qdf2400;
+ *ops = &qdf2400_ops;
return 1;
}
}
@@ -285,23 +345,15 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
static const struct of_device_id emac_sgmii_dt_match[] = {
{
.compatible = "qcom,fsm9900-emac-sgmii",
- .data = emac_sgmii_init_fsm9900,
+ .data = &fsm9900_ops,
},
{
.compatible = "qcom,qdf2432-emac-sgmii",
- .data = emac_sgmii_init_qdf2432,
+ .data = &qdf2432_ops,
},
{}
};
-/* Dummy function for systems without an internal PHY. This avoids having
- * to check for NULL pointers before calling the functions.
- */
-static int emac_sgmii_dummy(struct emac_adapter *adpt)
-{
- return 0;
-}
-
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
@@ -312,22 +364,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
if (has_acpi_companion(&pdev->dev)) {
struct device *dev;
- dev = device_find_child(&pdev->dev, &phy->initialize,
+ dev = device_find_child(&pdev->dev, &phy->sgmii_ops,
emac_sgmii_acpi_match);
if (!dev) {
dev_warn(&pdev->dev, "cannot find internal phy node\n");
- /* There is typically no internal PHY on emulation
- * systems, so if we can't find the node, assume
- * we are on an emulation system and stub-out
- * support for the internal PHY. These systems only
- * use ACPI.
- */
- phy->open = emac_sgmii_dummy;
- phy->close = emac_sgmii_dummy;
- phy->link_up = emac_sgmii_dummy;
- phy->link_down = emac_sgmii_dummy;
-
return 0;
}
@@ -355,14 +396,9 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- phy->initialize = (emac_sgmii_function)match->data;
+ phy->sgmii_ops = (struct sgmii_ops *)match->data;
}
- phy->open = emac_sgmii_open;
- phy->close = emac_sgmii_close;
- phy->link_up = emac_sgmii_link_up;
- phy->link_down = emac_sgmii_link_down;
-
/* Base address is the first address */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -386,7 +422,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
}
}
- ret = phy->initialize(adpt);
+ ret = emac_sgmii_init(adpt);
if (ret)
goto error;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
index e7c0c3b2baa4..31ba21eb61d2 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -16,36 +16,44 @@
struct emac_adapter;
struct platform_device;
-typedef int (*emac_sgmii_function)(struct emac_adapter *adpt);
+/** emac_sgmii - internal emac phy
+ * @init initialization function
+ * @open called when the driver is opened
+ * @close called when the driver is closed
+ * @link_change called when the link state changes
+ */
+struct sgmii_ops {
+ int (*init)(struct emac_adapter *adpt);
+ int (*open)(struct emac_adapter *adpt);
+ void (*close)(struct emac_adapter *adpt);
+ int (*link_change)(struct emac_adapter *adpt, bool link_state);
+ void (*reset)(struct emac_adapter *adpt);
+};
/** emac_sgmii - internal emac phy
* @base base address
* @digital per-lane digital block
* @irq the interrupt number
* @decode_error_count reference count of consecutive decode errors
- * @initialize initialization function
- * @open called when the driver is opened
- * @close called when the driver is closed
- * @link_up called when the link comes up
- * @link_down called when the link comes down
+ * @sgmii_ops sgmii ops
*/
struct emac_sgmii {
void __iomem *base;
void __iomem *digital;
unsigned int irq;
atomic_t decode_error_count;
- emac_sgmii_function initialize;
- emac_sgmii_function open;
- emac_sgmii_function close;
- emac_sgmii_function link_up;
- emac_sgmii_function link_down;
+ struct sgmii_ops *sgmii_ops;
};
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
-void emac_sgmii_reset(struct emac_adapter *adpt);
int emac_sgmii_init_fsm9900(struct emac_adapter *adpt);
int emac_sgmii_init_qdf2432(struct emac_adapter *adpt);
int emac_sgmii_init_qdf2400(struct emac_adapter *adpt);
+int emac_sgmii_init(struct emac_adapter *adpt);
+int emac_sgmii_open(struct emac_adapter *adpt);
+void emac_sgmii_close(struct emac_adapter *adpt);
+int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state);
+void emac_sgmii_reset(struct emac_adapter *adpt);
#endif
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 13235baf4766..2a0cbc535a2e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,7 +253,7 @@ static int emac_open(struct net_device *netdev)
return ret;
}
- ret = adpt->phy.open(adpt);
+ ret = emac_sgmii_open(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
@@ -264,7 +264,7 @@ static int emac_open(struct net_device *netdev)
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
- adpt->phy.close(adpt);
+ emac_sgmii_close(adpt);
return ret;
}
@@ -278,7 +278,7 @@ static int emac_close(struct net_device *netdev)
mutex_lock(&adpt->reset_lock);
- adpt->phy.close(adpt);
+ emac_sgmii_close(adpt);
emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
@@ -761,11 +761,10 @@ static void emac_shutdown(struct platform_device *pdev)
{
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
- struct emac_sgmii *sgmii = &adpt->phy;
if (netdev->flags & IFF_UP) {
/* Closing the SGMII turns off its interrupts */
- sgmii->close(adpt);
+ emac_sgmii_close(adpt);
/* Resetting the MAC turns off all DMA and its interrupts */
emac_mac_reset(adpt);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 0b5b5da80198..34ac45a774e7 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -54,11 +54,24 @@ struct rmnet_pcpu_stats {
struct u64_stats_sync syncp;
};
+struct rmnet_priv_stats {
+ u64 csum_ok;
+ u64 csum_valid_unset;
+ u64 csum_validation_failed;
+ u64 csum_err_bad_buffer;
+ u64 csum_err_invalid_ip_version;
+ u64 csum_err_invalid_transport;
+ u64 csum_fragmented_pkt;
+ u64 csum_skipped;
+ u64 csum_sw;
+};
+
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
+ struct rmnet_priv_stats stats;
};
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 6fcd586e9804..7fd86d40a337 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -148,7 +148,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
- goto fail;
+ return -ENOMEM;
}
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
@@ -156,17 +156,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
- goto fail;
+ return -ENOMEM;
map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
return 0;
-
-fail:
- kfree_skb(skb);
- return -ENOMEM;
}
static void
@@ -228,15 +224,18 @@ void rmnet_egress_handler(struct sk_buff *skb)
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
- if (!port) {
- kfree_skb(skb);
- return;
- }
+ if (!port)
+ goto drop;
if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
- return;
+ goto drop;
rmnet_vnd_tx_fixup(skb, orig_dev);
dev_queue_xmit(skb);
+ return;
+
+drop:
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 78fdad0c6f76..3ee8ae9b6838 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -67,28 +67,20 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
- int xmit_status;
-
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
- if (skb->len < sizeof(struct rmnet_map_header) +
- RMNET_MAP_GET_LENGTH(skb) +
- sizeof(struct rmnet_map_dl_csum_trailer)) {
- kfree_skb(skb);
- return;
- }
-
- skb_trim(skb, skb->len -
- sizeof(struct rmnet_map_dl_csum_trailer));
- }
+ struct net_device *dev = skb->dev;
+
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ skb_trim(skb,
+ skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb->protocol = htons(ETH_P_MAP);
cmd = RMNET_MAP_GET_CMD_START(skb);
cmd->cmd_type = type & 0x03;
- netif_tx_lock(skb->dev);
- xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
- netif_tx_unlock(skb->dev);
+ netif_tx_lock(dev);
+ dev->netdev_ops->ndo_start_xmit(skb, dev);
+ netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a6ea09416f8d..57a9c314a665 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -48,7 +48,8 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
static int
rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
- struct rmnet_map_dl_csum_trailer *csum_trailer)
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
{
__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
u16 csum_value, csum_value_final;
@@ -58,19 +59,25 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
ip4h = (struct iphdr *)(skb->data);
if ((ntohs(ip4h->frag_off) & IP_MF) ||
- ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+ priv->stats.csum_fragmented_pkt++;
return -EOPNOTSUPP;
+ }
txporthdr = skb->data + ip4h->ihl * 4;
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
- if (!csum_field)
+ if (!csum_field) {
+ priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
+ }
/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
- if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+ priv->stats.csum_skipped++;
return 0;
+ }
csum_value = ~ntohs(csum_trailer->csum_value);
hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
@@ -102,16 +109,20 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
}
}
- if (csum_value_final == ntohs((__force __be16)*csum_field))
+ if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+ priv->stats.csum_ok++;
return 0;
- else
+ } else {
+ priv->stats.csum_validation_failed++;
return -EINVAL;
+ }
}
#if IS_ENABLED(CONFIG_IPV6)
static int
rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
- struct rmnet_map_dl_csum_trailer *csum_trailer)
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
{
__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
u16 csum_value, csum_value_final;
@@ -125,8 +136,10 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
txporthdr = skb->data + sizeof(struct ipv6hdr);
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
- if (!csum_field)
+ if (!csum_field) {
+ priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
+ }
csum_value = ~ntohs(csum_trailer->csum_value);
ip6_hdr_csum = (__force __be16)
@@ -164,10 +177,13 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
}
}
- if (csum_value_final == ntohs((__force __be16)*csum_field))
+ if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+ priv->stats.csum_ok++;
return 0;
- else
+ } else {
+ priv->stats.csum_validation_failed++;
return -EINVAL;
+ }
}
#endif
@@ -339,24 +355,34 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
*/
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
struct rmnet_map_dl_csum_trailer *csum_trailer;
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
return -EOPNOTSUPP;
+ }
csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
- if (!csum_trailer->valid)
+ if (!csum_trailer->valid) {
+ priv->stats.csum_valid_unset++;
return -EINVAL;
+ }
- if (skb->protocol == htons(ETH_P_IP))
- return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
- else if (skb->protocol == htons(ETH_P_IPV6))
+ if (skb->protocol == htons(ETH_P_IP)) {
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
- return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
#else
+ priv->stats.csum_err_invalid_ip_version++;
return -EPROTONOSUPPORT;
#endif
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ return -EPROTONOSUPPORT;
+ }
return 0;
}
@@ -367,6 +393,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
void *iphdr;
@@ -389,8 +416,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
return;
#else
+ priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
}
}
@@ -399,4 +429,6 @@ sw_csum:
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ip4_ind = 0;
+
+ priv->stats.csum_sw++;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 2ea16a088de8..b9a7548ec6a0 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -152,6 +152,56 @@ static const struct net_device_ops rmnet_vnd_ops = {
.ndo_get_stats64 = rmnet_get_stats64,
};
+static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "Checksum ok",
+ "Checksum valid bit not set",
+ "Checksum validation failed",
+ "Checksum error bad buffer",
+ "Checksum error bad ip version",
+ "Checksum error bad transport",
+ "Checksum skipped on ip fragment",
+ "Checksum skipped",
+ "Checksum computed in software",
+};
+
+static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &rmnet_gstrings_stats,
+ sizeof(rmnet_gstrings_stats));
+ break;
+ }
+}
+
+static int rmnet_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rmnet_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rmnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_priv_stats *st = &priv->stats;
+
+ if (!data)
+ return;
+
+ memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
+}
+
+static const struct ethtool_ops rmnet_ethtool_ops = {
+ .get_ethtool_stats = rmnet_get_ethtool_stats,
+ .get_strings = rmnet_get_strings,
+ .get_sset_count = rmnet_get_sset_count,
+};
+
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
* flags, ARP type, needed headroom, etc...
*/
@@ -170,6 +220,11 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
rmnet_dev->needs_free_netdev = true;
+ rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+
+ /* This perm addr will be used as interface identifier by IPv6 */
+ rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(rmnet_dev->perm_addr);
}
/* Exposed API */
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index d118da5a10a2..ffd68a7bc9e1 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1104,7 +1104,6 @@ static int rtl8139_init_one(struct pci_dev *pdev,
return 0;
err_out:
- netif_napi_del(&tp->napi);
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
return i;
@@ -1119,7 +1118,6 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
assert (dev != NULL);
cancel_delayed_work_sync(&tp->thread);
- netif_napi_del(&tp->napi);
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c7aac1fc99e8..75dfac0248f4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -84,12 +84,11 @@
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
-#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define R8169_REGS_SIZE 256
-#define R8169_NAPI_WEIGHT 64
+#define R8169_RX_BUF_SIZE (SZ_16K - 1)
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -172,12 +171,11 @@ enum rtl_tx_desc_version {
#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
-#define _R(NAME,TD,FW,SZ,B) { \
+#define _R(NAME,TD,FW,SZ) { \
.name = NAME, \
.txd_version = TD, \
.fw_name = FW, \
.jumbo_max = SZ, \
- .jumbo_tx_csum = B \
}
static const struct {
@@ -185,135 +183,111 @@ static const struct {
enum rtl_tx_desc_version txd_version;
const char *fw_name;
u16 jumbo_max;
- bool jumbo_tx_csum;
} rtl_chip_infos[] = {
/* PCI devices. */
[RTL_GIGA_MAC_VER_01] =
- _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
+ _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K),
[RTL_GIGA_MAC_VER_02] =
- _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
+ _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K),
[RTL_GIGA_MAC_VER_03] =
- _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
+ _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K),
[RTL_GIGA_MAC_VER_04] =
- _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
+ _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K),
[RTL_GIGA_MAC_VER_05] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
[RTL_GIGA_MAC_VER_06] =
- _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K),
/* PCI-E devices. */
[RTL_GIGA_MAC_VER_07] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_08] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_09] =
- _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_10] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_11] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
[RTL_GIGA_MAC_VER_12] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
[RTL_GIGA_MAC_VER_13] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_14] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
+ _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_15] =
- _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
+ _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_16] =
- _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K),
[RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K),
[RTL_GIGA_MAC_VER_18] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_19] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_20] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_21] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_22] =
- _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_23] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_24] =
- _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K),
[RTL_GIGA_MAC_VER_25] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
- JUMBO_9K, false),
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K),
[RTL_GIGA_MAC_VER_26] =
- _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
- JUMBO_9K, false),
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K),
[RTL_GIGA_MAC_VER_27] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
[RTL_GIGA_MAC_VER_28] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
[RTL_GIGA_MAC_VER_29] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
- JUMBO_1K, true),
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
[RTL_GIGA_MAC_VER_30] =
- _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
- JUMBO_1K, true),
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K),
[RTL_GIGA_MAC_VER_31] =
- _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K),
[RTL_GIGA_MAC_VER_32] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
- JUMBO_9K, false),
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K),
[RTL_GIGA_MAC_VER_33] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
- JUMBO_9K, false),
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K),
[RTL_GIGA_MAC_VER_34] =
- _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
- JUMBO_9K, false),
+ _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K),
[RTL_GIGA_MAC_VER_35] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
- JUMBO_9K, false),
+ _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K),
[RTL_GIGA_MAC_VER_36] =
- _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
- JUMBO_9K, false),
+ _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K),
[RTL_GIGA_MAC_VER_37] =
- _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
- JUMBO_1K, true),
+ _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K),
[RTL_GIGA_MAC_VER_38] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
- JUMBO_9K, false),
+ _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K),
[RTL_GIGA_MAC_VER_39] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
- JUMBO_1K, true),
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K),
[RTL_GIGA_MAC_VER_40] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2,
- JUMBO_9K, false),
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K),
[RTL_GIGA_MAC_VER_41] =
- _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
+ _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K),
[RTL_GIGA_MAC_VER_42] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3,
- JUMBO_9K, false),
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K),
[RTL_GIGA_MAC_VER_43] =
- _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
- JUMBO_1K, true),
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K),
[RTL_GIGA_MAC_VER_44] =
- _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2,
- JUMBO_9K, false),
+ _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K),
[RTL_GIGA_MAC_VER_45] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1,
- JUMBO_9K, false),
+ _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K),
[RTL_GIGA_MAC_VER_46] =
- _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2,
- JUMBO_9K, false),
+ _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K),
[RTL_GIGA_MAC_VER_47] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1,
- JUMBO_1K, false),
+ _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K),
[RTL_GIGA_MAC_VER_48] =
- _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2,
- JUMBO_1K, false),
+ _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K),
[RTL_GIGA_MAC_VER_49] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL,
- JUMBO_9K, false),
+ _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
[RTL_GIGA_MAC_VER_50] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL,
- JUMBO_9K, false),
+ _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
[RTL_GIGA_MAC_VER_51] =
- _R("RTL8168ep/8111ep", RTL_TD_1, NULL,
- JUMBO_9K, false),
+ _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K),
};
#undef _R
@@ -345,7 +319,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static int rx_buf_sz = 16383;
static int use_dac = -1;
static struct {
u32 msg_enable;
@@ -437,13 +410,8 @@ enum rtl8168_8101_registers {
CSIAR = 0x68,
#define CSIAR_FLAG 0x80000000
#define CSIAR_WRITE_CMD 0x80000000
-#define CSIAR_BYTE_ENABLE 0x0f
-#define CSIAR_BYTE_ENABLE_SHIFT 12
-#define CSIAR_ADDR_MASK 0x0fff
-#define CSIAR_FUNC_CARD 0x00000000
-#define CSIAR_FUNC_SDIO 0x00010000
-#define CSIAR_FUNC_NIC 0x00020000
-#define CSIAR_FUNC_NIC2 0x00010000
+#define CSIAR_BYTE_ENABLE 0x0000f000
+#define CSIAR_ADDR_MASK 0x00000fff
PMCH = 0x6f,
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
@@ -626,6 +594,7 @@ enum rtl_register_content {
RxChkSum = (1 << 5),
PCIDAC = (1 << 4),
PCIMulRW = (1 << 3),
+#define INTT_MASK GENMASK(1, 0)
INTT_0 = 0x0000, // 8168
INTT_1 = 0x0001, // 8168
INTT_2 = 0x0002, // 8168
@@ -716,6 +685,7 @@ enum rtl_rx_desc_bit {
};
#define RsvdMask 0x3fffc000
+#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
struct TxDesc {
__le32 opts1;
@@ -778,7 +748,6 @@ struct rtl8169_private {
struct net_device *dev;
struct napi_struct napi;
u32 msg_enable;
- u16 txd_version;
u16 mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
@@ -802,26 +771,16 @@ struct rtl8169_private {
int (*read)(struct rtl8169_private *, int);
} mdio_ops;
- struct pll_power_ops {
- void (*down)(struct rtl8169_private *);
- void (*up)(struct rtl8169_private *);
- } pll_power_ops;
-
struct jumbo_ops {
void (*enable)(struct rtl8169_private *);
void (*disable)(struct rtl8169_private *);
} jumbo_ops;
- struct csi_ops {
- void (*write)(struct rtl8169_private *, int, int);
- u32 (*read)(struct rtl8169_private *, int);
- } csi_ops;
-
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
int (*get_link_ksettings)(struct net_device *,
struct ethtool_link_ksettings *);
void (*phy_reset_enable)(struct rtl8169_private *tp);
- void (*hw_start)(struct net_device *);
+ void (*hw_start)(struct rtl8169_private *tp);
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
unsigned int (*link_ok)(struct rtl8169_private *tp);
int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
@@ -833,14 +792,11 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- unsigned features;
-
struct mii_if_info mii;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
- u32 opts1_mask;
struct rtl_fw {
const struct firmware *fw;
@@ -1645,23 +1601,8 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
if (options & LinkUp)
wolopts |= WAKE_PHY;
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
wolopts |= WAKE_MAGIC;
break;
@@ -1722,23 +1663,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
tmp = ARRAY_SIZE(cfg) - 1;
if (wolopts & WAKE_MAGIC)
rtl_w0w1_eri(tp,
@@ -1960,18 +1886,20 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
features &= ~NETIF_F_ALL_TSO;
if (dev->mtu > JUMBO_1K &&
- !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
+ tp->mac_version > RTL_GIGA_MAC_VER_06)
features &= ~NETIF_F_IP_CSUM;
return features;
}
-static void __rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
u32 rx_config;
+ rtl_lock_work(tp);
+
rx_config = RTL_R32(tp, RxConfig);
if (features & NETIF_F_RXALL)
rx_config |= (AcceptErr | AcceptRunt);
@@ -1990,28 +1918,14 @@ static void __rtl8169_set_features(struct net_device *dev,
else
tp->cp_cmd &= ~RxVlan;
- tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum);
-
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_R16(tp, CPlusCmd);
-}
-
-static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
- rtl_lock_work(tp);
- if (features ^ dev->features)
- __rtl8169_set_features(dev, features);
rtl_unlock_work(tp);
return 0;
}
-
static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
{
return (skb_vlan_tag_present(skb)) ?
@@ -2155,9 +2069,8 @@ DECLARE_RTL_COND(rtl_counters_cond)
return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
}
-static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
+static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
{
- struct rtl8169_private *tp = netdev_priv(dev);
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
@@ -2170,10 +2083,8 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
-static bool rtl8169_reset_counters(struct net_device *dev)
+static bool rtl8169_reset_counters(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
/*
* Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
* tally counters.
@@ -2181,13 +2092,11 @@ static bool rtl8169_reset_counters(struct net_device *dev)
if (tp->mac_version < RTL_GIGA_MAC_VER_19)
return true;
- return rtl8169_do_counters(dev, CounterReset);
+ return rtl8169_do_counters(tp, CounterReset);
}
-static bool rtl8169_update_counters(struct net_device *dev)
+static bool rtl8169_update_counters(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
/*
* Some chips are unable to dump tally counters when the receiver
* is disabled.
@@ -2195,12 +2104,11 @@ static bool rtl8169_update_counters(struct net_device *dev)
if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
return true;
- return rtl8169_do_counters(dev, CounterDump);
+ return rtl8169_do_counters(tp, CounterDump);
}
-static bool rtl8169_init_counter_offsets(struct net_device *dev)
+static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
struct rtl8169_counters *counters = tp->counters;
bool ret = false;
@@ -2223,10 +2131,10 @@ static bool rtl8169_init_counter_offsets(struct net_device *dev)
return true;
/* If both, reset and update fail, propagate to caller. */
- if (rtl8169_reset_counters(dev))
+ if (rtl8169_reset_counters(tp))
ret = true;
- if (rtl8169_update_counters(dev))
+ if (rtl8169_update_counters(tp))
ret = true;
tp->tc_offset.tx_errors = counters->tx_errors;
@@ -2249,7 +2157,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
pm_runtime_get_noresume(d);
if (pm_runtime_active(d))
- rtl8169_update_counters(dev);
+ rtl8169_update_counters(tp);
pm_runtime_put_noidle(d);
@@ -2391,7 +2299,7 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
if (IS_ERR(ci))
return PTR_ERR(ci);
- scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3];
+ scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
/* read IntrMitigate and adjust according to scale */
for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
@@ -2490,7 +2398,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
RTL_W16(tp, IntrMitigate, swab16(w));
- tp->cp_cmd = (tp->cp_cmd & ~3) | cp01;
+ tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_R16(tp, CPlusCmd);
@@ -2520,7 +2428,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- struct net_device *dev, u8 default_version)
+ u8 default_version)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2560,12 +2468,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
/* 8168E family. */
{ 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
- { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
{ 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
{ 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
/* 8168D family. */
- { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
{ 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
{ 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
@@ -2575,32 +2481,24 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
/* 8168C family. */
- { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
{ 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
{ 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
{ 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
{ 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
{ 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
{ 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
- { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
{ 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
{ 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
- { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
{ 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
- { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
{ 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
{ 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
- { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
- { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
{ 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
- { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
- { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
{ 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
{ 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
{ 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
@@ -2635,8 +2533,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
tp->mac_version = p->mac_version;
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- netif_notice(tp, probe, dev,
- "unknown MAC, using family default\n");
+ dev_notice(tp_to_dev(tp),
+ "unknown MAC, using family default\n");
tp->mac_version = default_version;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
tp->mac_version = tp->mii.supports_gmii ?
@@ -4685,18 +4583,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
ops->write = r8168g_mdio_write;
ops->read = r8168g_mdio_read;
break;
@@ -4741,21 +4628,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -4775,79 +4648,13 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
return true;
}
-static void r810x_phy_power_down(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
-}
-
-static void r810x_phy_power_up(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-}
-
-static void r810x_pll_power_down(struct rtl8169_private *tp)
-{
- if (rtl_wol_pll_power_down(tp))
- return;
-
- r810x_phy_power_down(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_16:
- break;
- default:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- }
-}
-
-static void r810x_pll_power_up(struct rtl8169_private *tp)
-{
- r810x_phy_power_up(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_16:
- break;
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- default:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- }
-}
-
static void r8168_phy_power_up(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_writephy(tp, 0x0e, 0x0000);
break;
@@ -4855,6 +4662,9 @@ static void r8168_phy_power_up(struct rtl8169_private *tp)
break;
}
rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void r8168_phy_power_down(struct rtl8169_private *tp)
@@ -4870,18 +4680,7 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_writephy(tp, 0x0e, 0x0200);
default:
@@ -4895,12 +4694,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (r8168_check_dash(tp))
return;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
- tp->mac_version == RTL_GIGA_MAC_VER_24) &&
- (RTL_R16(tp, CPlusCmd) & ASF)) {
- return;
- }
-
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(tp, 0x19, 0xff64);
@@ -4911,16 +4704,15 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
r8168_phy_power_down(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
case RTL_GIGA_MAC_VER_45:
case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
@@ -4938,18 +4730,17 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
static void r8168_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_43:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
break;
case RTL_GIGA_MAC_VER_44:
case RTL_GIGA_MAC_VER_45:
case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
@@ -4966,130 +4757,41 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
r8168_phy_power_up(tp);
}
-static void rtl_generic_op(struct rtl8169_private *tp,
- void (*op)(struct rtl8169_private *))
-{
- if (op)
- op(tp);
-}
-
static void rtl_pll_power_down(struct rtl8169_private *tp)
{
- rtl_generic_op(tp, tp->pll_power_ops.down);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
+ break;
+ default:
+ r8168_pll_power_down(tp);
+ }
}
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
- rtl_generic_op(tp, tp->pll_power_ops.up);
-
- /* give MAC/PHY some time to resume */
- msleep(20);
-}
-
-static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
-{
- struct pll_power_ops *ops = &tp->pll_power_ops;
-
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_16:
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- ops->down = r810x_pll_power_down;
- ops->up = r810x_pll_power_up;
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
break;
-
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- ops->down = r8168_pll_power_down;
- ops->up = r8168_pll_power_up;
- break;
-
default:
- ops->down = NULL;
- ops->up = NULL;
- break;
+ r8168_pll_power_up(tp);
}
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01:
- case RTL_GIGA_MAC_VER_02:
- case RTL_GIGA_MAC_VER_03:
- case RTL_GIGA_MAC_VER_04:
- case RTL_GIGA_MAC_VER_05:
- case RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_14:
- case RTL_GIGA_MAC_VER_15:
- case RTL_GIGA_MAC_VER_16:
- case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_35:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
@@ -5105,16 +4807,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- rtl_generic_op(tp, tp->jumbo_ops.enable);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ if (tp->jumbo_ops.enable) {
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ tp->jumbo_ops.enable(tp);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ }
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- rtl_generic_op(tp, tp->jumbo_ops.disable);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ if (tp->jumbo_ops.disable) {
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ tp->jumbo_ops.disable(tp);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ }
}
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5128,7 +4834,7 @@ static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5154,7 +4860,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x0c);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5166,7 +4872,7 @@ static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
{
rtl_tx_performance_tweak(tp,
- (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
}
static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5226,18 +4932,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
* No action needed for jumbo frames with 8169.
* No jumbo for 810x at all.
*/
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -5323,32 +5018,21 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_rx_close(tp);
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_36 ||
- tp->mac_version == RTL_GIGA_MAC_VER_37 ||
- tp->mac_version == RTL_GIGA_MAC_VER_38 ||
- tp->mac_version == RTL_GIGA_MAC_VER_40 ||
- tp->mac_version == RTL_GIGA_MAC_VER_41 ||
- tp->mac_version == RTL_GIGA_MAC_VER_42 ||
- tp->mac_version == RTL_GIGA_MAC_VER_43 ||
- tp->mac_version == RTL_GIGA_MAC_VER_44 ||
- tp->mac_version == RTL_GIGA_MAC_VER_45 ||
- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
- tp->mac_version == RTL_GIGA_MAC_VER_47 ||
- tp->mac_version == RTL_GIGA_MAC_VER_48 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) {
+ break;
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
- } else {
+ break;
+ default:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
udelay(100);
+ break;
}
rtl_hw_reset(tp);
@@ -5361,13 +5045,10 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
(InterFrameGap << TxInterFrameGapShift));
}
-static void rtl_hw_start(struct net_device *dev)
+static void rtl_set_rx_max_size(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- tp->hw_start(dev);
-
- rtl_irq_enable_all(tp);
+ /* Low hurts. Let's disable the filtering. */
+ RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
}
static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
@@ -5383,21 +5064,6 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
}
-static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp)
-{
- u16 cmd;
-
- cmd = RTL_R16(tp, CPlusCmd);
- RTL_W16(tp, CPlusCmd, cmd);
- return cmd;
-}
-
-static void rtl_set_rx_max_size(struct rtl8169_private *tp, unsigned int rx_buf_sz)
-{
- /* Low hurts. Let's disable the filtering. */
- RTL_W16(tp, RxMaxSize, rx_buf_sz + 1);
-}
-
static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
{
static const struct rtl_cfg2_info {
@@ -5475,36 +5141,34 @@ static void rtl_set_rx_mode(struct net_device *dev)
RTL_W32(tp, RxConfig, tmp);
}
-static void rtl_hw_start_8169(struct net_device *dev)
+static void rtl_hw_start(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct pci_dev *pdev = tp->pci_dev;
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW);
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
- }
-
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
- tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03 ||
- tp->mac_version == RTL_GIGA_MAC_VER_04)
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_init_rxcfg(tp);
+ tp->hw_start(tp);
- RTL_W8(tp, EarlyTxThres, NoEarlyTx);
+ rtl_set_rx_max_size(tp);
+ rtl_set_rx_tx_desc_registers(tp);
+ rtl_set_rx_tx_config_registers(tp);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(tp, IntrMask);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_mode(tp->dev);
+ /* no early-rx interrupts */
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
+ rtl_irq_enable_all(tp);
+}
- rtl_set_rx_max_size(tp, rx_buf_sz);
+static void rtl_hw_start_8169(struct rtl8169_private *tp)
+{
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
- tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03 ||
- tp->mac_version == RTL_GIGA_MAC_VER_04)
- rtl_set_rx_tx_config_registers(tp);
+ RTL_W8(tp, EarlyTxThres, NoEarlyTx);
- tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW;
+ tp->cp_cmd |= PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
@@ -5523,56 +5187,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
*/
RTL_W16(tp, IntrMitigate, 0x0000);
- rtl_set_rx_tx_desc_registers(tp);
-
- if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
- tp->mac_version != RTL_GIGA_MAC_VER_02 &&
- tp->mac_version != RTL_GIGA_MAC_VER_03 &&
- tp->mac_version != RTL_GIGA_MAC_VER_04) {
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_set_rx_tx_config_registers(tp);
- }
-
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-
- /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(tp, IntrMask);
-
RTL_W32(tp, RxMissed, 0);
-
- rtl_set_rx_mode(dev);
-
- /* no early-rx interrupts */
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
-}
-
-static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
-{
- if (tp->csi_ops.write)
- tp->csi_ops.write(tp, addr, value);
-}
-
-static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
-{
- return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
-}
-
-static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
-{
- u32 csi;
-
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | bits);
-}
-
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x17000000);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x27000000);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -5580,101 +5195,55 @@ DECLARE_RTL_COND(rtl_csiar_cond)
return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
}
-static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
-{
- RTL_W32(tp, CSIDR, value);
- RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
-}
-
-static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
+static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(tp, CSIDR) : ~0;
-}
+ u32 func = PCI_FUNC(tp->pci_dev->devfn);
-static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
-{
RTL_W32(tp, CSIDR, value);
RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
- CSIAR_FUNC_NIC);
+ CSIAR_BYTE_ENABLE | func << 16);
rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
+static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
{
- RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+ u32 func = PCI_FUNC(tp->pci_dev->devfn);
+
+ RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
+ CSIAR_BYTE_ENABLE);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(tp, CSIDR) : ~0;
}
-static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
+static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
{
- RTL_W32(tp, CSIDR, value);
- RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
- CSIAR_FUNC_NIC2);
+ struct pci_dev *pdev = tp->pci_dev;
+ u32 csi;
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+ /* According to Realtek the value at config space address 0x070f
+ * controls the L0s/L1 entrance latency. We try standard ECAM access
+ * first and if it fails fall back to CSI.
+ */
+ if (pdev->cfg_size > 0x070f &&
+ pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
+ return;
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+ csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
+ rtl_csi_write(tp, 0x070c, csi | val << 24);
}
-static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
+static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
{
- RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(tp, CSIDR) : ~0;
+ rtl_csi_access_enable(tp, 0x17);
}
-static void rtl_init_csi_ops(struct rtl8169_private *tp)
+static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
{
- struct csi_ops *ops = &tp->csi_ops;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01:
- case RTL_GIGA_MAC_VER_02:
- case RTL_GIGA_MAC_VER_03:
- case RTL_GIGA_MAC_VER_04:
- case RTL_GIGA_MAC_VER_05:
- case RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_14:
- case RTL_GIGA_MAC_VER_15:
- case RTL_GIGA_MAC_VER_16:
- case RTL_GIGA_MAC_VER_17:
- ops->write = NULL;
- ops->read = NULL;
- break;
-
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- ops->write = r8402_csi_write;
- ops->read = r8402_csi_read;
- break;
-
- case RTL_GIGA_MAC_VER_44:
- ops->write = r8411_csi_write;
- ops->read = r8411_csi_read;
- break;
-
- default:
- ops->write = r8169_csi_write;
- ops->read = r8169_csi_read;
- break;
- }
+ rtl_csi_access_enable(tp, 0x27);
}
struct ephy_info {
@@ -5721,25 +5290,15 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config3, data);
}
-#define R8168_CPCMD_QUIRK_MASK (\
- EnableBist | \
- Mac_dbgo_oe | \
- Force_half_dup | \
- Force_rxflow_en | \
- Force_txflow_en | \
- Cxpl_dbg_sel | \
- ASF | \
- PktCntrDisable | \
- Mac_dbgo_sel)
-
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
if (tp->dev->mtu <= ETH_DATA_LEN) {
- rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) |
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
PCI_EXP_DEVCTL_NOSNOOP_EN);
}
}
@@ -5760,11 +5319,12 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_disable_clock_request(tp);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
@@ -5791,9 +5351,10 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -5808,9 +5369,10 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -5865,9 +5427,10 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
@@ -5875,7 +5438,7 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
@@ -5892,7 +5455,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
@@ -5924,7 +5487,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
@@ -5949,7 +5512,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5979,7 +5542,7 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
rtl_csi_access_enable_2(tp);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -6050,7 +5613,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
@@ -6150,7 +5713,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
@@ -6232,7 +5795,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
@@ -6328,18 +5891,12 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe860, data);
}
-static void rtl_hw_start_8168(struct net_device *dev)
+static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
-
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(tp, rx_buf_sz);
-
- tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1;
-
+ tp->cp_cmd &= ~INTT_MASK;
+ tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_W16(tp, IntrMitigate, 0x5151);
@@ -6350,12 +5907,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
tp->event_slow &= ~RxOverflow;
}
- rtl_set_rx_tx_desc_registers(tp);
-
- rtl_set_rx_tx_config_registers(tp);
-
- RTL_R8(tp, IntrMask);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
rtl_hw_start_8168bb(tp);
@@ -6456,30 +6007,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
- dev->name, tp->mac_version);
+ tp->dev->name, tp->mac_version);
break;
}
-
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
-
- rtl_set_rx_mode(dev);
-
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
-#define R810X_CPCMD_QUIRK_MASK (\
- EnableBist | \
- Mac_dbgo_oe | \
- Force_half_dup | \
- Force_rxflow_en | \
- Force_txflow_en | \
- Cxpl_dbg_sel | \
- ASF | \
- PktCntrDisable | \
- Mac_dbgo_sel)
-
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -6498,7 +6030,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
RTL_W8(tp, DBG_REG, FIX_NAK_1);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
RTL_W8(tp, Config1,
LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
@@ -6515,7 +6047,7 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
rtl_csi_access_enable_2(tp);
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -6578,7 +6110,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
- rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
@@ -6603,32 +6135,21 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_enable(tp, false);
}
-static void rtl_hw_start_8101(struct net_device *dev)
+static void rtl_hw_start_8101(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct pci_dev *pdev = tp->pci_dev;
-
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
tp->event_slow &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16)
- pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
+ pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_NOSNOOP_EN);
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
-
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(tp, rx_buf_sz);
-
- tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl_set_rx_tx_desc_registers(tp);
-
- rtl_set_rx_tx_config_registers(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(tp);
@@ -6665,17 +6186,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
break;
}
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-
RTL_W16(tp, IntrMitigate, 0x0000);
-
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
-
- rtl_set_rx_mode(dev);
-
- RTL_R8(tp, IntrMask);
-
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -6702,29 +6213,22 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
void **data_buff, struct RxDesc *desc)
{
- dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), rx_buf_sz,
- DMA_FROM_DEVICE);
+ dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr),
+ R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
kfree(*data_buff);
*data_buff = NULL;
rtl8169_make_unusable_by_asic(desc);
}
-static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
- desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
-}
-
-static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
- u32 rx_buf_sz)
-{
- desc->addr = cpu_to_le64(mapping);
- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
}
static inline void *rtl8169_align(void *data)
@@ -6738,21 +6242,20 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
void *data;
dma_addr_t mapping;
struct device *d = tp_to_dev(tp);
- struct net_device *dev = tp->dev;
- int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ int node = dev_to_node(d);
- data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node);
if (!data)
return NULL;
if (rtl8169_align(data) != data) {
kfree(data);
- data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
+ data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node);
if (!data)
return NULL;
}
- mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
+ mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
@@ -6760,7 +6263,8 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
goto err_out;
}
- rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
+ desc->addr = cpu_to_le64(mapping);
+ rtl8169_mark_to_asic(desc);
return data;
err_out:
@@ -6792,9 +6296,6 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
for (i = 0; i < NUM_RX_DESC; i++) {
void *data;
- if (tp->Rx_databuff[i])
- continue;
-
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
@@ -6811,14 +6312,12 @@ err_out:
return -ENOMEM;
}
-static int rtl8169_init_ring(struct net_device *dev)
+static int rtl8169_init_ring(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
rtl8169_init_ring_indexes(tp);
- memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
- memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
+ memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
+ memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
return rtl8169_rx_fill(tp);
}
@@ -6877,13 +6376,13 @@ static void rtl_reset_work(struct rtl8169_private *tp)
rtl8169_hw_reset(tp);
for (i = 0; i < NUM_RX_DESC; i++)
- rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
+ rtl8169_mark_to_asic(tp->RxDescArray + i);
rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
napi_enable(&tp->napi);
- rtl_hw_start(dev);
+ rtl_hw_start(tp);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp);
}
@@ -7015,18 +6514,6 @@ static int msdn_giant_send_check(struct sk_buff *skb)
return ret;
}
-static inline __be16 get_protocol(struct sk_buff *skb)
-{
- __be16 protocol;
-
- if (skb->protocol == htons(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
- return protocol;
-}
-
static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
@@ -7063,7 +6550,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return false;
}
- switch (get_protocol(skb)) {
+ switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[0] |= TD1_GTSENV4;
break;
@@ -7095,7 +6582,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return false;
}
- switch (get_protocol(skb)) {
+ switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[1] |= TD1_IPv4_CS;
ip_protocol = ip_hdr(skb)->protocol;
@@ -7365,7 +6852,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
prefetch(data);
skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb)
- memcpy(skb->data, data, pkt_size);
+ skb_copy_to_linear_data(skb, data, pkt_size);
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
return skb;
@@ -7383,7 +6870,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
- status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
+ status = le32_to_cpu(desc->opts1);
if (status & DescOwn)
break;
@@ -7401,14 +6888,16 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dev->stats.rx_length_errors++;
if (status & RxCRC)
dev->stats.rx_crc_errors++;
- if (status & RxFOVF) {
+ /* RxFOVF is a reserved bit on later chip versions */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 &&
+ status & RxFOVF) {
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
dev->stats.rx_fifo_errors++;
- }
- if ((status & (RxRUNT | RxCRC)) &&
- !(status & (RxRWT | RxFOVF)) &&
- (dev->features & NETIF_F_RXALL))
+ } else if (status & (RxRUNT | RxCRC) &&
+ !(status & RxRWT) &&
+ dev->features & NETIF_F_RXALL) {
goto process_pkt;
+ }
} else {
struct sk_buff *skb;
dma_addr_t addr;
@@ -7457,7 +6946,7 @@ process_pkt:
}
release_descriptor:
desc->opts2 = 0;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ rtl8169_mark_to_asic(desc);
}
count = cur_rx - tp->cur_rx;
@@ -7468,8 +6957,7 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_instance;
int handled = 0;
u16 status;
@@ -7480,7 +6968,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
handled = 1;
rtl_irq_disable(tp);
- napi_schedule(&tp->napi);
+ napi_schedule_irqoff(&tp->napi);
}
}
return IRQ_RETVAL(handled);
@@ -7631,7 +7119,7 @@ static int rtl8169_close(struct net_device *dev)
pm_runtime_get_sync(&pdev->dev);
/* Update counters before going down */
- rtl8169_update_counters(dev);
+ rtl8169_update_counters(tp);
rtl_lock_work(tp);
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -7641,7 +7129,7 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- pci_free_irq(pdev, 0, dev);
+ pci_free_irq(pdev, 0, tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
@@ -7686,7 +7174,7 @@ static int rtl_open(struct net_device *dev)
if (!tp->RxDescArray)
goto err_free_tx_0;
- retval = rtl8169_init_ring(dev);
+ retval = rtl8169_init_ring(tp);
if (retval < 0)
goto err_free_rx_1;
@@ -7696,7 +7184,7 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, dev,
+ retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
dev->name);
if (retval < 0)
goto err_release_fw_2;
@@ -7709,13 +7197,11 @@ static int rtl_open(struct net_device *dev)
rtl8169_init_phy(dev, tp);
- __rtl8169_set_features(dev, dev->features);
-
rtl_pll_power_up(tp);
- rtl_hw_start(dev);
+ rtl_hw_start(tp);
- if (!rtl8169_init_counter_offsets(dev))
+ if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
netif_start_queue(dev);
@@ -7784,7 +7270,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
* from tally counters.
*/
if (pm_runtime_active(&pdev->dev))
- rtl8169_update_counters(dev);
+ rtl8169_update_counters(tp);
/*
* Subtract values fetched during initalization.
@@ -7880,7 +7366,7 @@ static int rtl8169_runtime_suspend(struct device *device)
/* Update counters before going runtime suspend */
rtl8169_rx_missed(dev);
- rtl8169_update_counters(dev);
+ rtl8169_update_counters(tp);
return 0;
}
@@ -8020,9 +7506,7 @@ static const struct net_device_ops rtl_netdev_ops = {
};
static const struct rtl_cfg_info {
- void (*hw_start)(struct net_device *);
- unsigned int region;
- unsigned int align;
+ void (*hw_start)(struct rtl8169_private *tp);
u16 event_slow;
unsigned int has_gmii:1;
const struct rtl_coalesce_info *coalesce_info;
@@ -8030,8 +7514,6 @@ static const struct rtl_cfg_info {
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
.hw_start = rtl_hw_start_8169,
- .region = 1,
- .align = 0,
.event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8169,
@@ -8039,8 +7521,6 @@ static const struct rtl_cfg_info {
},
[RTL_CFG_1] = {
.hw_start = rtl_hw_start_8168,
- .region = 2,
- .align = 8,
.event_slow = SYSErr | LinkChg | RxOverflow,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8168_8136,
@@ -8048,8 +7528,6 @@ static const struct rtl_cfg_info {
},
[RTL_CFG_2] = {
.hw_start = rtl_hw_start_8101,
- .region = 2,
- .align = 8,
.event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
PCSTimeout,
.coalesce_info = rtl_coalesce_info_8168_8136,
@@ -8125,20 +7603,10 @@ static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
rtl_hw_init_8168ep(tp);
break;
default:
@@ -8149,11 +7617,10 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
- const unsigned int region = cfg->region;
struct rtl8169_private *tp;
struct mii_if_info *mii;
struct net_device *dev;
- int chipset, i;
+ int chipset, region, i;
int rc;
if (netif_msg_drv(&debug)) {
@@ -8188,43 +7655,41 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
- netif_err(tp, probe, dev, "enable failure\n");
+ dev_err(&pdev->dev, "enable failure\n");
return rc;
}
if (pcim_set_mwi(pdev) < 0)
- netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+ dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
- /* make sure PCI base addr 1 is MMIO */
- if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- netif_err(tp, probe, dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
+ /* use first MMIO region */
+ region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
+ if (region < 0) {
+ dev_err(&pdev->dev, "no MMIO resource found\n");
return -ENODEV;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- netif_err(tp, probe, dev,
- "Invalid PCI region size(s), aborting\n");
+ dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
return -ENODEV;
}
rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
if (rc < 0) {
- netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
return rc;
}
tp->mmio_addr = pcim_iomap_table(pdev)[region];
if (!pci_is_pcie(pdev))
- netif_info(tp, probe, dev, "not PCI Express\n");
+ dev_info(&pdev->dev, "not PCI Express\n");
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+ rtl8169_get_mac_version(tp, cfg->default_ver);
- tp->cp_cmd = 0;
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
@@ -8239,7 +7704,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
+ dev_err(&pdev->dev, "DMA configuration failed\n");
return rc;
}
}
@@ -8257,18 +7722,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
rtl_init_mdio_ops(tp);
- rtl_init_pll_power_ops(tp);
rtl_init_jumbo_ops(tp);
- rtl_init_csi_ops(tp);
rtl8169_print_mac_version(tp);
chipset = tp->mac_version;
- tp->txd_version = rtl_chip_infos[chipset].txd_version;
rc = rtl_alloc_irq(tp);
if (rc < 0) {
- netif_err(tp, probe, dev, "Can't allocate interrupt\n");
+ dev_err(&pdev->dev, "Can't allocate interrupt\n");
return rc;
}
@@ -8296,29 +7758,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
- if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_36 ||
- tp->mac_version == RTL_GIGA_MAC_VER_37 ||
- tp->mac_version == RTL_GIGA_MAC_VER_38 ||
- tp->mac_version == RTL_GIGA_MAC_VER_40 ||
- tp->mac_version == RTL_GIGA_MAC_VER_41 ||
- tp->mac_version == RTL_GIGA_MAC_VER_42 ||
- tp->mac_version == RTL_GIGA_MAC_VER_43 ||
- tp->mac_version == RTL_GIGA_MAC_VER_44 ||
- tp->mac_version == RTL_GIGA_MAC_VER_45 ||
- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
- tp->mac_version == RTL_GIGA_MAC_VER_47 ||
- tp->mac_version == RTL_GIGA_MAC_VER_48 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) {
- u16 mac_addr[3];
-
+ switch (tp->mac_version) {
+ u8 mac_addr[ETH_ALEN] __aligned(4);
+ case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
*(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
- *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
- if (is_valid_ether_addr((u8 *)mac_addr))
- rtl_rar_set(tp, (u8 *)mac_addr);
+ if (is_valid_ether_addr(mac_addr))
+ rtl_rar_set(tp, mac_addr);
+ break;
+ default:
+ break;
}
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
@@ -8326,7 +7777,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &rtl8169_ethtool_ops;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
- netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
/* don't enable SG, IP_CSUM and TSO by default - it might not work
* properly for all devices */
@@ -8349,13 +7800,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (tp->txd_version == RTL_TD_0)
+ switch (rtl_chip_infos[chipset].txd_version) {
+ case RTL_TD_0:
tp->tso_csum = rtl8169_tso_csum_v1;
- else if (tp->txd_version == RTL_TD_1) {
+ break;
+ case RTL_TD_1:
tp->tso_csum = rtl8169_tso_csum_v2;
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- } else
+ break;
+ default:
WARN_ON_ONCE(1);
+ }
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -8368,9 +7823,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->event_slow = cfg->event_slow;
tp->coalesce_info = cfg->coalesce_info;
- tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
- ~(RxBOVF | RxFOVF) : ~0;
-
timer_setup(&tp->timer, rtl8169_phy_timer, 0);
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
@@ -8387,15 +7839,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc < 0)
return rc;
- netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
- rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr,
- (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff),
+ netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
+ rtl_chip_infos[chipset].name, dev->dev_addr,
+ (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
pci_irq_vector(pdev, 0));
if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
"tx checksumming: %s]\n",
rtl_chip_infos[chipset].jumbo_max,
- rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+ tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko");
}
if (r8168_check_dash(tp))
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b6b90a6314e3..e9007b613f17 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -442,12 +442,33 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
- iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return;
+
+ iowrite32(data, mdp->tsu_addr + offset);
}
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
- return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return ~0U;
+
+ return ioread32(mdp->tsu_addr + offset);
+}
+
+static void sh_eth_soft_swap(char *src, int len)
+{
+#ifdef __LITTLE_ENDIAN
+ u32 *p = (u32 *)src;
+ u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
+
+ for (; p < maxp; p++)
+ *p = swab32(*p);
+#endif
}
static void sh_eth_select_mii(struct net_device *ndev)
@@ -456,6 +477,9 @@ static void sh_eth_select_mii(struct net_device *ndev)
u32 value;
switch (mdp->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
+ value = 0x3;
+ break;
case PHY_INTERFACE_MODE_GMII:
value = 0x2;
break;
@@ -693,7 +717,7 @@ static struct sh_eth_cpu_data rcar_gen1_data = {
EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
@@ -725,7 +749,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = {
EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
@@ -740,6 +764,49 @@ static struct sh_eth_cpu_data rcar_gen2_data = {
.rmiimode = 1,
.magic = 1,
};
+
+/* R8A77980 */
+static struct sh_eth_cpu_data r8a77980_data = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate_gether,
+
+ .register_type = SH_ETH_REG_GIGABIT,
+
+ .edtrr_trns = EDTRR_TRNS_GETHER,
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
+ ECSIPR_MPDIP,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
+
+ .tx_check = EESR_FTC | EESR_CD | EESR_TRO,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RFE | EESR_RDE | EESR_RFRMER |
+ EESR_TFE | EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .nbst = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .xdfar_rw = 1,
+ .hw_checksum = 1,
+ .select_mii = 1,
+ .magic = 1,
+ .cexcr = 1,
+};
#endif /* CONFIG_OF */
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -775,7 +842,7 @@ static struct sh_eth_cpu_data sh7724_data = {
EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
@@ -820,7 +887,7 @@ static struct sh_eth_cpu_data sh7757_data = {
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
@@ -1421,8 +1488,13 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
+ /* DMA transfer burst mode */
+ if (mdp->cd->nbst)
+ sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
+
+ /* Burst cycle count upper-limit */
if (mdp->cd->bculr)
- sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
+ sh_eth_write(ndev, 0x800, BCULR);
sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
@@ -2610,12 +2682,6 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
}
/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
-static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
- int entry)
-{
- return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
-}
-
static u32 sh_eth_tsu_get_post_mask(int entry)
{
return 0x0f << (28 - ((entry % 8) * 4));
@@ -2630,27 +2696,25 @@ static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ int reg = TSU_POST1 + entry / 8;
u32 tmp;
- void *reg_offset;
- reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
- tmp = ioread32(reg_offset);
- iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
+ tmp = sh_eth_tsu_read(mdp, reg);
+ sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
}
static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ int reg = TSU_POST1 + entry / 8;
u32 post_mask, ref_mask, tmp;
- void *reg_offset;
- reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
post_mask = sh_eth_tsu_get_post_mask(entry);
ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
- tmp = ioread32(reg_offset);
- iowrite32(tmp & ~post_mask, reg_offset);
+ tmp = sh_eth_tsu_read(mdp, reg);
+ sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
/* If other port enables, the function returns "true" */
return tmp & ref_mask;
@@ -3023,15 +3087,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
pdev->name, pdev->id);
/* register MDIO bus */
- if (dev->of_node) {
- ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
- } else {
- if (pd->phy_irq > 0)
- mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
-
- ret = mdiobus_register(mdp->mii_bus);
- }
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+ ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
if (ret)
goto out_free_bus;
@@ -3130,6 +3189,7 @@ static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
+ { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 1bf930d4a1e5..726c55a82dd7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -184,6 +184,7 @@ enum GECMR_BIT {
/* EDMR */
enum DMAC_M_BIT {
+ EDMR_NBST = 0x80,
EDMR_EL = 0x40, /* Litte endian */
EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
EDMR_SRST_GETHER = 0x03,
@@ -242,7 +243,7 @@ enum EESR_BIT {
EESR_CND = 0x00000800,
EESR_DLC = 0x00000400,
EESR_CD = 0x00000200,
- EESR_RTO = 0x00000100,
+ EESR_TRO = 0x00000100,
EESR_RMAF = 0x00000080,
EESR_CEEF = 0x00000040,
EESR_CELF = 0x00000020,
@@ -262,7 +263,7 @@ enum EESR_BIT {
EESR_CERF) /* Recv frame CRC error */
#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
- EESR_RTO)
+ EESR_TRO)
#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
EESR_RDE | EESR_RFRMER | EESR_ADE | \
EESR_TFE | EESR_TDE)
@@ -498,20 +499,21 @@ struct sh_eth_cpu_data {
/* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
- unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+ unsigned no_psr:1; /* EtherC DOES NOT have PSR */
+ unsigned apr:1; /* EtherC has APR */
+ unsigned mpr:1; /* EtherC has MPR */
+ unsigned tpauser:1; /* EtherC has TPAUSER */
+ unsigned bculr:1; /* EtherC has BCULR */
+ unsigned tsu:1; /* EtherC has TSU */
+ unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */
+ unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC has RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DOES NOT have TRIMD */
+ unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */
unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */
unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */
unsigned hw_checksum:1; /* E-DMAC has CSMR */
- unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
+ unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
@@ -558,18 +560,6 @@ struct sh_eth_private {
unsigned wol_enabled:1;
};
-static inline void sh_eth_soft_swap(char *src, int len)
-{
-#ifdef __LITTLE_ENDIAN__
- u32 *p = (u32 *)src;
- u32 *maxp;
- maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32));
-
- for (; p < maxp; p++)
- *p = swab32(*p);
-#endif
-}
-
static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
int enum_index)
{
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 056cb6093630..aeafdb9ac015 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1632,6 +1632,9 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
+ if (netif_is_bridge_master(vlan->obj.orig_dev))
+ return -EOPNOTSUPP;
+
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
@@ -1647,6 +1650,9 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
+ if (netif_is_bridge_master(vlan->obj.orig_dev))
+ return -EOPNOTSUPP;
+
if (!wops->port_obj_vlan_del)
return -EOPNOTSUPP;
return wops->port_obj_vlan_del(rocker_port, vlan);
@@ -2738,6 +2744,8 @@ static void rocker_switchdev_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
err = rocker_world_port_fdb_add(rocker_port, fdb_info);
if (err) {
netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
@@ -2747,6 +2755,8 @@ static void rocker_switchdev_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
err = rocker_world_port_fdb_del(rocker_port, fdb_info);
if (err)
netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 661828e8fdcf..ad4a354ce570 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1550,6 +1550,38 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
+#if defined(CONFIG_SMP)
+static void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+static void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+static void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+static void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel, *end_channel;
@@ -3307,6 +3339,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
cancel_work_sync(&efx->reset_work);
efx_disable_interrupts(efx);
+ efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -3456,6 +3489,8 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
+
+ efx_set_interrupt_affinity(efx);
rc = efx_enable_interrupts(efx);
if (rc)
goto fail6;
@@ -3463,6 +3498,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
return 0;
fail6:
+ efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index cece961f2e82..c3ad564ac4c0 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -435,17 +435,18 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
} while (1);
}
-/* Remove buffers put into a tx_queue. None of the buffers must have
- * an skb attached.
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
*/
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
{
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
/* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != tx_queue->write_count) {
+ while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
@@ -504,6 +505,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
*/
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
+ unsigned int old_insert_count = tx_queue->insert_count;
+ bool xmit_more = skb->xmit_more;
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@@ -553,8 +556,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Update BQL */
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
+ efx_tx_maybe_stop_queue(tx_queue);
+
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@@ -577,14 +582,26 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
tx_queue->tx_packets++;
}
- efx_tx_maybe_stop_queue(tx_queue);
-
return NETDEV_TX_OK;
err:
- efx_enqueue_unwind(tx_queue);
+ efx_enqueue_unwind(tx_queue, old_insert_count);
dev_kfree_skb_any(skb);
+
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue or a partner queue then we need to push here to get the
+ * previous packets out.
+ */
+ if (!xmit_more) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
+ efx_nic_push_buffers(tx_queue);
+ }
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
index 6bcfe27fc560..b80048ca82a0 100644
--- a/drivers/net/ethernet/socionext/Kconfig
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -14,6 +14,8 @@ if NET_VENDOR_SOCIONEXT
config SNI_AVE
tristate "Socionext AVE ethernet support"
depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ depends on HAS_IOMEM
+ select MFD_SYSCON
select PHYLIB
---help---
Driver for gigabit ethernet MACs, called AVE, in the
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 59fbf74dcada..ce8071fc90c4 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1057,7 +1057,8 @@ static int netsec_netdev_load_microcode(struct netsec_priv *priv)
return 0;
}
-static int netsec_reset_hardware(struct netsec_priv *priv)
+static int netsec_reset_hardware(struct netsec_priv *priv,
+ bool load_ucode)
{
u32 value;
int err;
@@ -1102,11 +1103,14 @@ static int netsec_reset_hardware(struct netsec_priv *priv)
netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1 << NETSEC_REG_DESC_ENDIAN);
- err = netsec_netdev_load_microcode(priv);
- if (err) {
- netif_err(priv, probe, priv->ndev,
- "%s: failed to load microcode (%d)\n", __func__, err);
- return err;
+ if (load_ucode) {
+ err = netsec_netdev_load_microcode(priv);
+ if (err) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: failed to load microcode (%d)\n",
+ __func__, err);
+ return err;
+ }
}
/* start DMA engines */
@@ -1313,8 +1317,8 @@ static int netsec_netdev_open(struct net_device *ndev)
napi_enable(&priv->napi);
netif_start_queue(ndev);
- /* Enable RX intr. */
- netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+ /* Enable TX+RX intr. */
+ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
return 0;
err3:
@@ -1328,6 +1332,7 @@ err1:
static int netsec_netdev_stop(struct net_device *ndev)
{
+ int ret;
struct netsec_priv *priv = netdev_priv(ndev);
netif_stop_queue(priv->ndev);
@@ -1343,12 +1348,14 @@ static int netsec_netdev_stop(struct net_device *ndev)
netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ ret = netsec_reset_hardware(priv, false);
+
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
pm_runtime_put_sync(priv->dev);
- return 0;
+ return ret;
}
static int netsec_netdev_init(struct net_device *ndev)
@@ -1364,7 +1371,7 @@ static int netsec_netdev_init(struct net_device *ndev)
if (ret)
goto err1;
- ret = netsec_reset_hardware(priv);
+ ret = netsec_reset_hardware(priv, true);
if (ret)
goto err2;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 0b3b7a460641..f7ecceeb1e28 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -18,6 +19,7 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
@@ -197,8 +199,16 @@
#define AVE_INTM_COUNT 20
#define AVE_FORCE_TXINTCNT 1
+/* SG */
+#define SG_ETPINMODE 0x540
+#define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */
+#define SG_ETPINMODE_RMII(ins) BIT(ins)
+
#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit)
+#define AVE_MAX_CLKS 4
+#define AVE_MAX_RSTS 2
+
enum desc_id {
AVE_DESCID_RX,
AVE_DESCID_TX,
@@ -225,10 +235,6 @@ struct ave_desc_info {
struct ave_desc *desc; /* skb info related descriptor */
};
-struct ave_soc_data {
- bool is_desc_64bit;
-};
-
struct ave_stats {
struct u64_stats_sync syncp;
u64 packets;
@@ -245,11 +251,16 @@ struct ave_private {
int phy_id;
unsigned int desc_size;
u32 msg_enable;
- struct clk *clk;
- struct reset_control *rst;
+ int nclks;
+ struct clk *clk[AVE_MAX_CLKS];
+ int nrsts;
+ struct reset_control *rst[AVE_MAX_RSTS];
phy_interface_t phy_mode;
struct phy_device *phydev;
struct mii_bus *mdio;
+ struct regmap *regmap;
+ unsigned int pinmode_mask;
+ unsigned int pinmode_val;
/* stats */
struct ave_stats stats_rx;
@@ -272,6 +283,14 @@ struct ave_private {
const struct ave_soc_data *data;
};
+struct ave_soc_data {
+ bool is_desc_64bit;
+ const char *clock_names[AVE_MAX_CLKS];
+ const char *reset_names[AVE_MAX_RSTS];
+ int (*get_pinmode)(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg);
+};
+
static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
int offset)
{
@@ -1153,20 +1172,30 @@ static int ave_init(struct net_device *ndev)
struct device_node *np = dev->of_node;
struct device_node *mdio_np;
struct phy_device *phydev;
- int ret;
+ int nc, nr, ret;
/* enable clk because of hw access until ndo_open */
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(dev, "can't enable clock\n");
- return ret;
+ for (nc = 0; nc < priv->nclks; nc++) {
+ ret = clk_prepare_enable(priv->clk[nc]);
+ if (ret) {
+ dev_err(dev, "can't enable clock\n");
+ goto out_clk_disable;
+ }
}
- ret = reset_control_deassert(priv->rst);
- if (ret) {
- dev_err(dev, "can't deassert reset\n");
- goto out_clk_disable;
+
+ for (nr = 0; nr < priv->nrsts; nr++) {
+ ret = reset_control_deassert(priv->rst[nr]);
+ if (ret) {
+ dev_err(dev, "can't deassert reset\n");
+ goto out_reset_assert;
+ }
}
+ ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
+ priv->pinmode_mask, priv->pinmode_val);
+ if (ret)
+ return ret;
+
ave_global_reset(ndev);
mdio_np = of_get_child_by_name(np, "mdio");
@@ -1207,9 +1236,11 @@ static int ave_init(struct net_device *ndev)
out_mdio_unregister:
mdiobus_unregister(priv->mdio);
out_reset_assert:
- reset_control_assert(priv->rst);
+ while (--nr >= 0)
+ reset_control_assert(priv->rst[nr]);
out_clk_disable:
- clk_disable_unprepare(priv->clk);
+ while (--nc >= 0)
+ clk_disable_unprepare(priv->clk[nc]);
return ret;
}
@@ -1217,13 +1248,16 @@ out_clk_disable:
static void ave_uninit(struct net_device *ndev)
{
struct ave_private *priv = netdev_priv(ndev);
+ int i;
phy_disconnect(priv->phydev);
mdiobus_unregister(priv->mdio);
/* disable clk because of hw access after ndo_stop */
- reset_control_assert(priv->rst);
- clk_disable_unprepare(priv->clk);
+ for (i = 0; i < priv->nrsts; i++)
+ reset_control_assert(priv->rst[i]);
+ for (i = 0; i < priv->nclks; i++)
+ clk_disable_unprepare(priv->clk[i]);
}
static int ave_open(struct net_device *ndev)
@@ -1520,6 +1554,7 @@ static int ave_probe(struct platform_device *pdev)
const struct ave_soc_data *data;
struct device *dev = &pdev->dev;
char buf[ETHTOOL_FWVERS_LEN];
+ struct of_phandle_args args;
phy_interface_t phy_mode;
struct ave_private *priv;
struct net_device *ndev;
@@ -1527,8 +1562,9 @@ static int ave_probe(struct platform_device *pdev)
struct resource *res;
const void *mac_addr;
void __iomem *base;
+ const char *name;
+ int i, irq, ret;
u64 dma_mask;
- int irq, ret;
u32 ave_id;
data = of_device_get_match_data(dev);
@@ -1541,12 +1577,6 @@ static int ave_probe(struct platform_device *pdev)
dev_err(dev, "phy-mode not found\n");
return -EINVAL;
}
- if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
- phy_mode != PHY_INTERFACE_MODE_RMII &&
- phy_mode != PHY_INTERFACE_MODE_MII) {
- dev_err(dev, "phy-mode is invalid\n");
- return -EINVAL;
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -1614,15 +1644,47 @@ static int ave_probe(struct platform_device *pdev)
u64_stats_init(&priv->stats_tx.syncp);
u64_stats_init(&priv->stats_rx.syncp);
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto out_free_netdev;
+ for (i = 0; i < AVE_MAX_CLKS; i++) {
+ name = priv->data->clock_names[i];
+ if (!name)
+ break;
+ priv->clk[i] = devm_clk_get(dev, name);
+ if (IS_ERR(priv->clk[i])) {
+ ret = PTR_ERR(priv->clk[i]);
+ goto out_free_netdev;
+ }
+ priv->nclks++;
+ }
+
+ for (i = 0; i < AVE_MAX_RSTS; i++) {
+ name = priv->data->reset_names[i];
+ if (!name)
+ break;
+ priv->rst[i] = devm_reset_control_get_shared(dev, name);
+ if (IS_ERR(priv->rst[i])) {
+ ret = PTR_ERR(priv->rst[i]);
+ goto out_free_netdev;
+ }
+ priv->nrsts++;
}
- priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
- if (IS_ERR(priv->rst)) {
- ret = PTR_ERR(priv->rst);
+ ret = of_parse_phandle_with_fixed_args(np,
+ "socionext,syscon-phy-mode",
+ 1, 0, &args);
+ if (ret) {
+ netdev_err(ndev, "can't get syscon-phy-mode property\n");
+ goto out_free_netdev;
+ }
+ priv->regmap = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(priv->regmap)) {
+ netdev_err(ndev, "can't map syscon-phy-mode\n");
+ ret = PTR_ERR(priv->regmap);
+ goto out_free_netdev;
+ }
+ ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
+ if (ret) {
+ netdev_err(ndev, "invalid phy-mode setting\n");
goto out_free_netdev;
}
@@ -1685,24 +1747,148 @@ static int ave_remove(struct platform_device *pdev)
return 0;
}
+static int ave_pro4_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 0)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_RMII(0);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_RMII(0);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RGMII:
+ priv->pinmode_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ave_ld11_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 0)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ priv->pinmode_val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ave_ld20_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 0)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_RMII(0);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_RMII(0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ priv->pinmode_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ave_pxs3_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 1)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_RMII(arg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ priv->pinmode_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct ave_soc_data ave_pro4_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "gio", "ether", "ether-gb", "ether-phy",
+ },
+ .reset_names = {
+ "gio", "ether",
+ },
+ .get_pinmode = ave_pro4_get_pinmode,
};
static const struct ave_soc_data ave_pxs2_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pro4_get_pinmode,
};
static const struct ave_soc_data ave_ld11_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_ld11_get_pinmode,
};
static const struct ave_soc_data ave_ld20_data = {
.is_desc_64bit = true,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_ld20_get_pinmode,
};
static const struct ave_soc_data ave_pxs3_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pxs3_get_pinmode,
};
static const struct of_device_id of_ave_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 972e4ef6d414..68e9e2640c62 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -4,7 +4,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
- dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o $(stmmac-y)
+ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
+ stmmac_tc.o $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index e93c40b4631e..b9c9003060c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -24,7 +24,7 @@
#include "stmmac.h"
-static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
@@ -51,8 +51,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].buf = des2;
tx_q->tx_skbuff_dma[entry].len = bmax;
/* do not close the descriptor and do not set own bit */
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
- 0, false, skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+ 0, false, skb->len);
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
@@ -68,9 +68,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
return -1;
tx_q->tx_skbuff_dma[entry].buf = des2;
tx_q->tx_skbuff_dma[entry].len = bmax;
- priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
- STMMAC_CHAIN_MODE, 1,
- false, skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
+ STMMAC_CHAIN_MODE, 1, false, skb->len);
len -= bmax;
i++;
} else {
@@ -83,9 +82,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].buf = des2;
tx_q->tx_skbuff_dma[entry].len = len;
/* last descriptor can be set now */
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_CHAIN_MODE, 1,
- true, skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
+ STMMAC_CHAIN_MODE, 1, true, skb->len);
len = 0;
}
}
@@ -95,7 +93,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
return entry;
}
-static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+static unsigned int is_jumbo_frm(int len, int enh_desc)
{
unsigned int ret = 0;
@@ -107,7 +105,7 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
+static void init_dma_chain(void *des, dma_addr_t phy_addr,
unsigned int size, unsigned int extend_desc)
{
/*
@@ -137,7 +135,7 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
}
}
-static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+static void refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
struct stmmac_priv *priv = rx_q->priv_data;
@@ -153,7 +151,7 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
sizeof(struct dma_desc)));
}
-static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+static void clean_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
struct stmmac_priv *priv = tx_q->priv_data;
@@ -171,9 +169,9 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
}
const struct stmmac_mode_ops chain_mode_ops = {
- .init = stmmac_init_dma_chain,
- .is_jumbo_frm = stmmac_is_jumbo_frm,
- .jumbo_frm = stmmac_jumbo_frm,
- .refill_desc3 = stmmac_refill_desc3,
- .clean_desc3 = stmmac_clean_desc3,
+ .init = init_dma_chain,
+ .is_jumbo_frm = is_jumbo_frm,
+ .jumbo_frm = jumbo_frm,
+ .refill_desc3 = refill_desc3,
+ .clean_desc3 = clean_desc3,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ad2388aee463..78fd0f8b8e81 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -32,12 +32,14 @@
#endif
#include "descs.h"
+#include "hwif.h"
#include "mmc.h"
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
#define DWMAC_CORE_4_00 0x40
+#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
#define DWMAC_CORE_5_10 0x51
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
@@ -344,6 +346,8 @@ struct dma_features {
/* TX and RX number of queues */
unsigned int number_rx_queues;
unsigned int number_tx_queues;
+ /* PPS output */
+ unsigned int pps_out_num;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
/* TX and RX FIFO sizes */
@@ -351,6 +355,10 @@ struct dma_features {
unsigned int rx_fifo_size;
/* Automotive Safety Package */
unsigned int asp;
+ /* RX Parser */
+ unsigned int frpsel;
+ unsigned int frpbs;
+ unsigned int frpes;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -377,197 +385,11 @@ struct dma_features {
#define JUMBO_LEN 9000
-/* Descriptors helpers */
-struct stmmac_desc_ops {
- /* DMA RX descriptor ring initialization */
- void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
- /* DMA TX descriptor ring initialization */
- void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
-
- /* Invoked by the xmit function to prepare the tx descriptor */
- void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
- bool csum_flag, int mode, bool tx_own,
- bool ls, unsigned int tot_pkt_len);
- void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
- int len2, bool tx_own, bool ls,
- unsigned int tcphdrlen,
- unsigned int tcppayloadlen);
- /* Set/get the owner of the descriptor */
- void (*set_tx_owner) (struct dma_desc *p);
- int (*get_tx_owner) (struct dma_desc *p);
- /* Clean the tx descriptor as soon as the tx irq is received */
- void (*release_tx_desc) (struct dma_desc *p, int mode);
- /* Clear interrupt on tx frame completion. When this bit is
- * set an interrupt happens as soon as the frame is transmitted */
- void (*set_tx_ic)(struct dma_desc *p);
- /* Last tx segment reports the transmit status */
- int (*get_tx_ls) (struct dma_desc *p);
- /* Return the transmit status looking at the TDES1 */
- int (*tx_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, void __iomem *ioaddr);
- /* Get the buffer size from the descriptor */
- int (*get_tx_len) (struct dma_desc *p);
- /* Handle extra events on specific interrupts hw dependent */
- void (*set_rx_owner) (struct dma_desc *p);
- /* Get the receive frame size */
- int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
- /* Return the reception status looking at the RDES1 */
- int (*rx_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p);
- void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_extended_desc *p);
- /* Set tx timestamp enable bit */
- void (*enable_tx_timestamp) (struct dma_desc *p);
- /* get tx timestamp status */
- int (*get_tx_timestamp_status) (struct dma_desc *p);
- /* get timestamp value */
- u64(*get_timestamp) (void *desc, u32 ats);
- /* get rx timestamp status */
- int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
- /* Display ring */
- void (*display_ring)(void *head, unsigned int size, bool rx);
- /* set MSS via context descriptor */
- void (*set_mss)(struct dma_desc *p, unsigned int mss);
-};
-
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
-/* Specific DMA helpers */
-struct stmmac_dma_ops {
- /* DMA core initialization */
- int (*reset)(void __iomem *ioaddr);
- void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds);
- void (*init_chan)(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, u32 chan);
- void (*init_rx_chan)(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan);
- void (*init_tx_chan)(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan);
- /* Configure the AXI Bus Mode Register */
- void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
- /* Dump DMA registers */
- void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
- int rxfifosz);
- void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
- int fifosz, u8 qmode);
- void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
- int fifosz, u8 qmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- void __iomem *ioaddr);
- void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
- void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
- void (*start_tx)(void __iomem *ioaddr, u32 chan);
- void (*stop_tx)(void __iomem *ioaddr, u32 chan);
- void (*start_rx)(void __iomem *ioaddr, u32 chan);
- void (*stop_rx)(void __iomem *ioaddr, u32 chan);
- int (*dma_interrupt) (void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan);
- /* If supported then get the optional core features */
- void (*get_hw_feature)(void __iomem *ioaddr,
- struct dma_features *dma_cap);
- /* Program the HW RX Watchdog */
- void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
- void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
- void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
- void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
- void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
- void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
-};
-
struct mac_device_info;
-/* Helpers to program the MAC core */
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
- /* Enable the MAC RX/TX */
- void (*set_mac)(void __iomem *ioaddr, bool enable);
- /* Enable and verify that the IPC module is supported */
- int (*rx_ipc)(struct mac_device_info *hw);
- /* Enable RX Queues */
- void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
- /* RX Queues Priority */
- void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
- /* TX Queues Priority */
- void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
- /* RX Queues Routing */
- void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
- u32 queue);
- /* Program RX Algorithms */
- void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
- /* Program TX Algorithms */
- void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
- /* Set MTL TX queues weight */
- void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
- u32 weight, u32 queue);
- /* RX MTL queue to RX dma mapping */
- void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
- /* Configure AV Algorithm */
- void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
- u32 idle_slope, u32 high_credit, u32 low_credit,
- u32 queue);
- /* Dump MAC registers */
- void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
- /* Handle extra events on specific interrupts hw dependent */
- int (*host_irq_status)(struct mac_device_info *hw,
- struct stmmac_extra_stats *x);
- /* Handle MTL interrupts */
- int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
- /* Multicast filter setting */
- void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
- /* Flow control setting */
- void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time, u32 tx_cnt);
- /* Set power management mode (e.g. magic frame) */
- void (*pmt)(struct mac_device_info *hw, unsigned long mode);
- /* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
- unsigned int reg_n);
- void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
- unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
- void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
- u32 rx_queues, u32 tx_queues);
- /* PCS calls */
- void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback);
- void (*pcs_rane)(void __iomem *ioaddr, bool restart);
- void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
- /* Safety Features */
- int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp);
- bool (*safety_feat_irq_status)(struct net_device *ndev,
- void __iomem *ioaddr, unsigned int asp,
- struct stmmac_safety_stats *stats);
- const char *(*safety_feat_dump)(struct stmmac_safety_stats *stats,
- int index, unsigned long *count);
-};
-
-/* PTP and HW Timer helpers */
-struct stmmac_hwtimestamp {
- void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
- u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
- int gmac4);
- int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
- int (*config_addend) (void __iomem *ioaddr, u32 addend);
- int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub, int gmac4);
- u64(*get_systime) (void __iomem *ioaddr);
-};
-
extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
@@ -590,24 +412,13 @@ struct mii_regs {
unsigned int clk_csr_mask;
};
-/* Helpers to manage the descriptors for chain and ring modes */
-struct stmmac_mode_ops {
- void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
- unsigned int extend_desc);
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
- int (*set_16kib_bfsize)(int mtu);
- void (*init_desc3)(struct dma_desc *p);
- void (*refill_desc3) (void *priv, struct dma_desc *p);
- void (*clean_desc3) (void *priv, struct dma_desc *p);
-};
-
struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
+ const struct stmmac_tc_ops *tc;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
@@ -625,12 +436,9 @@ struct stmmac_rx_routing {
u32 reg_shift;
};
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries,
- int *synopsys_id);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
-struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries, int *synopsys_id);
+int dwmac100_setup(struct stmmac_priv *priv);
+int dwmac1000_setup(struct stmmac_priv *priv);
+int dwmac4_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
@@ -650,24 +458,4 @@ extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
extern const struct stmmac_desc_ops dwmac4_desc_ops;
-/**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static inline u32 stmmac_get_synopsys_id(u32 hwid)
-{
- /* Check Synopsys Id (not available on old chips) */
- if (likely(hwid)) {
- u32 uid = ((hwid & 0x0000ff00) >> 8);
- u32 synid = (hwid & 0x000000ff);
-
- pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
- uid, synid);
-
- return synid;
- }
- return 0;
-}
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 7cb794094a70..4ff231df7322 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -29,6 +30,10 @@
#define PRG_ETH0_RGMII_MODE BIT(0)
+#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0)
+#define PRG_ETH0_EXT_RGMII_MODE 1
+#define PRG_ETH0_EXT_RMII_MODE 4
+
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
@@ -47,12 +52,20 @@
#define MUX_CLK_NUM_PARENTS 2
+struct meson8b_dwmac;
+
+struct meson8b_dwmac_data {
+ int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+};
+
struct meson8b_dwmac {
- struct device *dev;
- void __iomem *regs;
- phy_interface_t phy_mode;
- struct clk *rgmii_tx_clk;
- u32 tx_delay_ns;
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct meson8b_dwmac_data *data;
+ phy_interface_t phy_mode;
+ struct clk *rgmii_tx_clk;
+ u32 tx_delay_ns;
};
struct meson8b_dwmac_clk_configs {
@@ -171,6 +184,59 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
return 0;
}
+static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac)
+{
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_RGMII_MODE,
+ PRG_ETH0_RGMII_MODE);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_RGMII_MODE, 0);
+ break;
+ default:
+ dev_err(dwmac->dev, "fail to set phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
+{
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_EXT_PHY_MODE_MASK,
+ PRG_ETH0_EXT_RGMII_MODE);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_EXT_PHY_MODE_MASK,
+ PRG_ETH0_EXT_RMII_MODE);
+ break;
+ default:
+ dev_err(dwmac->dev, "fail to set phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
@@ -188,10 +254,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- /* enable RGMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
- PRG_ETH0_RGMII_MODE);
-
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
@@ -224,10 +286,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
break;
case PHY_INTERFACE_MODE_RMII:
- /* disable RGMII mode -> enables RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
- 0);
-
/* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK,
@@ -274,6 +332,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->data = (const struct meson8b_dwmac_data *)
+ of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->regs)) {
@@ -298,6 +361,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_remove_config_dt;
+ ret = dwmac->data->set_phy_mode(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = meson8b_init_prg_eth(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -316,10 +383,31 @@ err_remove_config_dt:
return ret;
}
+static const struct meson8b_dwmac_data meson8b_dwmac_data = {
+ .set_phy_mode = meson8b_set_phy_mode,
+};
+
+static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+};
+
static const struct of_device_id meson8b_dwmac_match[] = {
- { .compatible = "amlogic,meson8b-dwmac" },
- { .compatible = "amlogic,meson8m2-dwmac" },
- { .compatible = "amlogic,meson-gxbb-dwmac" },
+ {
+ .compatible = "amlogic,meson8b-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-dwmac",
+ .data = &meson_axg_dwmac_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 13133b30b575..f08625a02cea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1104,30 +1104,20 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
} else {
if (bsp_priv->clk_enabled) {
if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- if (!IS_ERR(bsp_priv->mac_clk_rx))
- clk_disable_unprepare(
- bsp_priv->mac_clk_rx);
+ clk_disable_unprepare(bsp_priv->mac_clk_rx);
- if (!IS_ERR(bsp_priv->clk_mac_ref))
- clk_disable_unprepare(
- bsp_priv->clk_mac_ref);
+ clk_disable_unprepare(bsp_priv->clk_mac_ref);
- if (!IS_ERR(bsp_priv->clk_mac_refout))
- clk_disable_unprepare(
- bsp_priv->clk_mac_refout);
+ clk_disable_unprepare(bsp_priv->clk_mac_refout);
}
- if (!IS_ERR(bsp_priv->clk_phy))
- clk_disable_unprepare(bsp_priv->clk_phy);
+ clk_disable_unprepare(bsp_priv->clk_phy);
- if (!IS_ERR(bsp_priv->aclk_mac))
- clk_disable_unprepare(bsp_priv->aclk_mac);
+ clk_disable_unprepare(bsp_priv->aclk_mac);
- if (!IS_ERR(bsp_priv->pclk_mac))
- clk_disable_unprepare(bsp_priv->pclk_mac);
+ clk_disable_unprepare(bsp_priv->pclk_mac);
- if (!IS_ERR(bsp_priv->mac_clk_tx))
- clk_disable_unprepare(bsp_priv->mac_clk_tx);
+ clk_disable_unprepare(bsp_priv->mac_clk_tx);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 9e6db16af663..7e2e79dedebf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -16,49 +16,180 @@
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/stmmac.h>
#include "stmmac_platform.h"
-#define MII_PHY_SEL_MASK BIT(23)
+#define SYSCFG_MCU_ETH_MASK BIT(23)
+#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16)
+
+#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
+#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+#define SYSCFG_PMCR_ETH_SEL_MII BIT(20)
+#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21)
+#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23)
+#define SYSCFG_PMCR_ETH_SEL_GMII 0
+#define SYSCFG_MCU_ETH_SEL_MII 0
+#define SYSCFG_MCU_ETH_SEL_RMII 1
struct stm32_dwmac {
struct clk *clk_tx;
struct clk *clk_rx;
+ struct clk *clk_eth_ck;
+ struct clk *clk_ethstp;
+ struct clk *syscfg_clk;
+ bool int_phyclk; /* Clock from RCC to drive PHY */
u32 mode_reg; /* MAC glue-logic mode register */
struct regmap *regmap;
u32 speed;
+ const struct stm32_ops *ops;
+ struct device *dev;
+};
+
+struct stm32_ops {
+ int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
+ int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare);
+ int (*suspend)(struct stm32_dwmac *dwmac);
+ void (*resume)(struct stm32_dwmac *dwmac);
+ int (*parse_data)(struct stm32_dwmac *dwmac,
+ struct device *dev);
+ u32 syscfg_eth_mask;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg;
- u32 val;
int ret;
- val = (plat_dat->interface == PHY_INTERFACE_MODE_MII) ? 0 : 1;
- ret = regmap_update_bits(dwmac->regmap, reg, MII_PHY_SEL_MASK, val);
- if (ret)
- return ret;
+ if (dwmac->ops->set_mode) {
+ ret = dwmac->ops->set_mode(plat_dat);
+ if (ret)
+ return ret;
+ }
ret = clk_prepare_enable(dwmac->clk_tx);
if (ret)
return ret;
- ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret)
- clk_disable_unprepare(dwmac->clk_tx);
+ if (!dwmac->dev->power.is_suspended) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret) {
+ clk_disable_unprepare(dwmac->clk_tx);
+ return ret;
+ }
+ }
+
+ if (dwmac->ops->clk_prepare) {
+ ret = dwmac->ops->clk_prepare(dwmac, true);
+ if (ret) {
+ clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_tx);
+ }
+ }
return ret;
}
+static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
+{
+ int ret = 0;
+
+ if (prepare) {
+ ret = clk_prepare_enable(dwmac->syscfg_clk);
+ if (ret)
+ return ret;
+
+ if (dwmac->int_phyclk) {
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ if (dwmac->int_phyclk)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
+ }
+ return ret;
+}
+
+static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = SYSCFG_PMCR_ETH_SEL_MII;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ val = SYSCFG_PMCR_ETH_SEL_GMII;
+ if (dwmac->int_phyclk)
+ val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_PMCR_ETH_SEL_RMII;
+ if (dwmac->int_phyclk)
+ val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ val = SYSCFG_PMCR_ETH_SEL_RGMII;
+ if (dwmac->int_phyclk)
+ val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
+ break;
+ default:
+ pr_debug("SYSCFG init : Do not manage %d interface\n",
+ plat_dat->interface);
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(dwmac->regmap, reg,
+ dwmac->ops->syscfg_eth_mask, val);
+}
+
+static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = SYSCFG_MCU_ETH_SEL_MII;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_MCU_ETH_SEL_RMII;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
+ break;
+ default:
+ pr_debug("SYSCFG init : Do not manage %d interface\n",
+ plat_dat->interface);
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(dwmac->regmap, reg,
+ dwmac->ops->syscfg_eth_mask, val);
+}
+
static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
{
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->clk_rx);
+
+ if (dwmac->ops->clk_prepare)
+ dwmac->ops->clk_prepare(dwmac, false);
}
static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
@@ -70,15 +201,22 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
/* Get TX/RX clocks */
dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx");
if (IS_ERR(dwmac->clk_tx)) {
- dev_err(dev, "No tx clock provided...\n");
+ dev_err(dev, "No ETH Tx clock provided...\n");
return PTR_ERR(dwmac->clk_tx);
}
+
dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx");
if (IS_ERR(dwmac->clk_rx)) {
- dev_err(dev, "No rx clock provided...\n");
+ dev_err(dev, "No ETH Rx clock provided...\n");
return PTR_ERR(dwmac->clk_rx);
}
+ if (dwmac->ops->parse_data) {
+ err = dwmac->ops->parse_data(dwmac, dev);
+ if (err)
+ return err;
+ }
+
/* Get mode register */
dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
if (IS_ERR(dwmac->regmap))
@@ -91,11 +229,46 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
return err;
}
+static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ dwmac->int_phyclk = of_property_read_bool(np, "st,int-phyclk");
+
+ /* Check if internal clk from RCC selected */
+ if (dwmac->int_phyclk) {
+ /* Get ETH_CLK clocks */
+ dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck");
+ if (IS_ERR(dwmac->clk_eth_ck)) {
+ dev_err(dev, "No ETH CK clock provided...\n");
+ return PTR_ERR(dwmac->clk_eth_ck);
+ }
+ }
+
+ /* Clock used for low power mode */
+ dwmac->clk_ethstp = devm_clk_get(dev, "ethstp");
+ if (IS_ERR(dwmac->clk_ethstp)) {
+ dev_err(dev, "No ETH peripheral clock provided for CStop mode ...\n");
+ return PTR_ERR(dwmac->clk_ethstp);
+ }
+
+ /* Clock for sysconfig */
+ dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk");
+ if (IS_ERR(dwmac->syscfg_clk)) {
+ dev_err(dev, "No syscfg clock provided...\n");
+ return PTR_ERR(dwmac->syscfg_clk);
+ }
+
+ return 0;
+}
+
static int stm32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct stm32_dwmac *dwmac;
+ const struct stm32_ops *data;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -112,6 +285,16 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->ops = data;
+ dwmac->dev = &pdev->dev;
+
ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
@@ -149,15 +332,48 @@ static int stm32_dwmac_remove(struct platform_device *pdev)
return ret;
}
+static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
+{
+ int ret = 0;
+
+ ret = clk_prepare_enable(dwmac->clk_ethstp);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ if (dwmac->int_phyclk)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
+
+ return ret;
+}
+
+static void stm32mp1_resume(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_ethstp);
+}
+
+static int stm32mcu_suspend(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int stm32_dwmac_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
+
int ret;
ret = stmmac_suspend(dev);
- stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+
+ if (dwmac->ops->suspend)
+ ret = dwmac->ops->suspend(dwmac);
return ret;
}
@@ -166,8 +382,12 @@ static int stm32_dwmac_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
int ret;
+ if (dwmac->ops->resume)
+ dwmac->ops->resume(dwmac);
+
ret = stm32_dwmac_init(priv->plat);
if (ret)
return ret;
@@ -181,8 +401,24 @@ static int stm32_dwmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
stm32_dwmac_suspend, stm32_dwmac_resume);
+static struct stm32_ops stm32mcu_dwmac_data = {
+ .set_mode = stm32mcu_set_mode,
+ .suspend = stm32mcu_suspend,
+ .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK
+};
+
+static struct stm32_ops stm32mp1_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .clk_prepare = stm32mp1_clk_prepare,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
+};
+
static const struct of_device_id stm32_dwmac_match[] = {
- { .compatible = "st,stm32-dwmac"},
+ { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data},
+ { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data},
{ }
};
MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
@@ -199,5 +435,6 @@ static struct platform_driver stm32_dwmac_driver = {
module_platform_driver(stm32_dwmac_driver);
MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@gmail.com>");
-MODULE_DESCRIPTION("STMicroelectronics MCU DWMAC Specific Glue layer");
+MODULE_AUTHOR("Christophe Roullier <christophe.roullier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 DWMAC Specific Glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index a3fa65b1ca8e..2e6e2a96b4f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -42,17 +42,27 @@
* This value is used for disabling properly EMAC
* and used as a good starting value in case of the
* boot process(uboot) leave some stuff.
+ * @syscon_field reg_field for the syscon's gmac register
* @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
* @support_rmii: Does the MAC handle RMII
* @support_rgmii: Does the MAC handle RGMII
+ *
+ * @rx_delay_max: Maximum raw value for RX delay chain
+ * @tx_delay_max: Maximum raw value for TX delay chain
+ * These two also indicate the bitmask for
+ * the RX and TX delay chain registers. A
+ * value of zero indicates this is not supported.
*/
struct emac_variant {
u32 default_syscon_value;
+ const struct reg_field *syscon_field;
bool soc_has_internal_phy;
bool support_mii;
bool support_rmii;
bool support_rgmii;
+ u8 rx_delay_max;
+ u8 tx_delay_max;
};
/* struct sunxi_priv_data - hold all sunxi private data
@@ -71,38 +81,70 @@ struct sunxi_priv_data {
struct regulator *regulator;
struct reset_control *rst_ephy;
const struct emac_variant *variant;
- struct regmap *regmap;
+ struct regmap_field *regmap_field;
bool internal_phy_powered;
void *mux_handle;
};
+/* EMAC clock register @ 0x30 in the "system control" address range */
+static const struct reg_field sun8i_syscon_reg_field = {
+ .reg = 0x30,
+ .lsb = 0,
+ .msb = 31,
+};
+
+/* EMAC clock register @ 0x164 in the CCU address range */
+static const struct reg_field sun8i_ccu_reg_field = {
+ .reg = 0x164,
+ .lsb = 0,
+ .msb = 31,
+};
+
static const struct emac_variant emac_variant_h3 = {
.default_syscon_value = 0x58000,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true,
.support_rmii = true,
- .support_rgmii = true
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
};
static const struct emac_variant emac_variant_v3s = {
.default_syscon_value = 0x38000,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
.default_syscon_value = 0,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
- .support_rgmii = true
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
+static const struct emac_variant emac_variant_r40 = {
+ .default_syscon_value = 0,
+ .syscon_field = &sun8i_ccu_reg_field,
+ .support_mii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 7,
};
static const struct emac_variant emac_variant_a64 = {
.default_syscon_value = 0,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
.support_rmii = true,
- .support_rgmii = true
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
};
#define EMAC_BASIC_CTL0 0x00
@@ -206,9 +248,7 @@ static const struct emac_variant emac_variant_a64 = {
#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
/* Generic system control EMAC_CLK bits */
-#define SYSCON_ETXDC_MASK GENMASK(2, 0)
#define SYSCON_ETXDC_SHIFT 10
-#define SYSCON_ERXDC_MASK GENMASK(4, 0)
#define SYSCON_ERXDC_SHIFT 5
/* EMAC PHY Interface Type */
#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
@@ -216,7 +256,6 @@ static const struct emac_variant emac_variant_a64 = {
#define SYSCON_ETCS_MII 0x0
#define SYSCON_ETCS_EXT_GMII 0x1
#define SYSCON_ETCS_INT_GMII 0x2
-#define SYSCON_EMAC_REG 0x30
/* sun8i_dwmac_dma_reset() - reset the EMAC
* Called from stmmac via stmmac_dma_ops->reset
@@ -237,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
- /* Write TX and RX descriptors address */
- writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
- writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
-
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
}
+static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* Write RX descriptors address */
+ writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST);
+}
+
+static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* Write TX descriptors address */
+ writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST);
+}
+
/* sun8i_dwmac_dump_regs() - Dump EMAC address space
* Called from stmmac_dma_ops->dump_regs
* Used for ethtool
@@ -398,13 +448,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ if (mode == SF_DMA_MODE) {
+ v |= EMAC_RX_MD;
+ } else {
+ v &= ~EMAC_RX_MD;
+ v &= ~EMAC_RX_TH_MASK;
+ if (mode < 32)
+ v |= EMAC_RX_TH_32;
+ else if (mode < 64)
+ v |= EMAC_RX_TH_64;
+ else if (mode < 96)
+ v |= EMAC_RX_TH_96;
+ else if (mode < 128)
+ v |= EMAC_RX_TH_128;
+ }
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 v;
v = readl(ioaddr + EMAC_TX_CTL1);
- if (txmode == SF_DMA_MODE) {
+ if (mode == SF_DMA_MODE) {
v |= EMAC_TX_MD;
/* Undocumented bit (called TX_NEXT_FRM in BSP), the original
* comment is
@@ -415,40 +488,26 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
} else {
v &= ~EMAC_TX_MD;
v &= ~EMAC_TX_TH_MASK;
- if (txmode < 64)
+ if (mode < 64)
v |= EMAC_TX_TH_64;
- else if (txmode < 128)
+ else if (mode < 128)
v |= EMAC_TX_TH_128;
- else if (txmode < 192)
+ else if (mode < 192)
v |= EMAC_TX_TH_192;
- else if (txmode < 256)
+ else if (mode < 256)
v |= EMAC_TX_TH_256;
}
writel(v, ioaddr + EMAC_TX_CTL1);
-
- v = readl(ioaddr + EMAC_RX_CTL1);
- if (rxmode == SF_DMA_MODE) {
- v |= EMAC_RX_MD;
- } else {
- v &= ~EMAC_RX_MD;
- v &= ~EMAC_RX_TH_MASK;
- if (rxmode < 32)
- v |= EMAC_RX_TH_32;
- else if (rxmode < 64)
- v |= EMAC_RX_TH_64;
- else if (rxmode < 96)
- v |= EMAC_RX_TH_96;
- else if (rxmode < 128)
- v |= EMAC_RX_TH_128;
- }
- writel(v, ioaddr + EMAC_RX_CTL1);
}
static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.reset = sun8i_dwmac_dma_reset,
.init = sun8i_dwmac_dma_init,
+ .init_rx_chan = sun8i_dwmac_dma_init_rx,
+ .init_tx_chan = sun8i_dwmac_dma_init_tx,
.dump_regs = sun8i_dwmac_dump_regs,
- .dma_mode = sun8i_dwmac_dma_operation_mode,
+ .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx,
+ .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx,
.enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
.enable_dma_irq = sun8i_dwmac_enable_dma_irq,
.disable_dma_irq = sun8i_dwmac_disable_dma_irq,
@@ -745,7 +804,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
bool need_power_ephy = false;
if (current_child ^ desired_child) {
- regmap_read(gmac->regmap, SYSCON_EMAC_REG, &reg);
+ regmap_field_read(gmac->regmap_field, &reg);
switch (desired_child) {
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
@@ -763,7 +822,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
desired_child);
return -EINVAL;
}
- regmap_write(gmac->regmap, SYSCON_EMAC_REG, val);
+ regmap_field_write(gmac->regmap_field, val);
if (need_power_ephy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
@@ -801,7 +860,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
int ret;
u32 reg, val;
- regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
+ regmap_field_read(gmac->regmap_field, &val);
reg = gmac->variant->default_syscon_value;
if (reg != val)
dev_warn(priv->device,
@@ -835,8 +894,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
}
val /= 100;
dev_dbg(priv->device, "set tx-delay to %x\n", val);
- if (val <= SYSCON_ETXDC_MASK) {
- reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT);
+ if (val <= gmac->variant->tx_delay_max) {
+ reg &= ~(gmac->variant->tx_delay_max <<
+ SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(priv->device, "Invalid TX clock delay: %d\n",
@@ -852,8 +912,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
}
val /= 100;
dev_dbg(priv->device, "set rx-delay to %x\n", val);
- if (val <= SYSCON_ERXDC_MASK) {
- reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT);
+ if (val <= gmac->variant->rx_delay_max) {
+ reg &= ~(gmac->variant->rx_delay_max <<
+ SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(priv->device, "Invalid RX clock delay: %d\n",
@@ -883,7 +944,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
return -EINVAL;
}
- regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+ regmap_field_write(gmac->regmap_field, reg);
return 0;
}
@@ -892,7 +953,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
{
u32 reg = gmac->variant->default_syscon_value;
- regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+ regmap_field_write(gmac->regmap_field, reg);
}
static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
@@ -971,6 +1032,34 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
return mac;
}
+static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node)
+{
+ struct device_node *syscon_node;
+ struct platform_device *syscon_pdev;
+ struct regmap *regmap = NULL;
+
+ syscon_node = of_parse_phandle(node, "syscon", 0);
+ if (!syscon_node)
+ return ERR_PTR(-ENODEV);
+
+ syscon_pdev = of_find_device_by_node(syscon_node);
+ if (!syscon_pdev) {
+ /* platform device might not be probed yet */
+ regmap = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_node;
+ }
+
+ /* If no regmap is found then the other device driver is at fault */
+ regmap = dev_get_regmap(&syscon_pdev->dev, NULL);
+ if (!regmap)
+ regmap = ERR_PTR(-EINVAL);
+
+ platform_device_put(syscon_pdev);
+out_put_node:
+ of_node_put(syscon_node);
+ return regmap;
+}
+
static int sun8i_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -980,6 +1069,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
+ struct regmap *regmap;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1014,14 +1104,41 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
gmac->regulator = NULL;
}
- gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "syscon");
- if (IS_ERR(gmac->regmap)) {
- ret = PTR_ERR(gmac->regmap);
+ /* The "GMAC clock control" register might be located in the
+ * CCU address range (on the R40), or the system control address
+ * range (on most other sun8i and later SoCs).
+ *
+ * The former controls most if not all clocks in the SoC. The
+ * latter has an SoC identification register, and on some SoCs,
+ * controls to map device specific SRAM to either the intended
+ * peripheral, or the CPU address space.
+ *
+ * In either case, there should be a coordinated and restricted
+ * method of accessing the register needed here. This is done by
+ * having the device export a custom regmap, instead of a generic
+ * syscon, which grants all access to all registers.
+ *
+ * To support old device trees, we fall back to using the syscon
+ * interface if possible.
+ */
+ regmap = sun8i_dwmac_get_syscon_from_dev(pdev->dev.of_node);
+ if (IS_ERR(regmap))
+ regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret);
return ret;
}
+ gmac->regmap_field = devm_regmap_field_alloc(dev, regmap,
+ *gmac->variant->syscon_field);
+ if (IS_ERR(gmac->regmap_field)) {
+ ret = PTR_ERR(gmac->regmap_field);
+ dev_err(dev, "Unable to map syscon register: %d\n", ret);
+ return ret;
+ }
+
plat_dat->interface = of_get_phy_mode(dev->of_node);
/* platform data specifying hardware features and callbacks.
@@ -1078,6 +1195,8 @@ static const struct of_device_id sun8i_dwmac_match[] = {
.data = &emac_variant_v3s },
{ .compatible = "allwinner,sun8i-a83t-emac",
.data = &emac_variant_a83t },
+ { .compatible = "allwinner,sun8i-r40-gmac",
+ .data = &emac_variant_r40 },
{ .compatible = "allwinner,sun50i-a64-emac",
.data = &emac_variant_a64 },
{ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index c02d36629c52..184ca13c8f79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -29,7 +29,6 @@
#define GMAC_MII_DATA 0x00000014 /* MII Data */
#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
-#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC_DEBUG 0x00000024 /* GMAC debug register */
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index ef10baf14186..0877bde6e860 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -27,6 +27,7 @@
#include <linux/ethtool.h>
#include <net/dsa.h>
#include <asm/io.h>
+#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac1000.h"
@@ -498,7 +499,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
-static const struct stmmac_ops dwmac1000_ops = {
+const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac1000_rx_ipc_enable,
@@ -519,28 +520,21 @@ static const struct stmmac_ops dwmac1000_ops = {
.pcs_get_adv_lp = dwmac1000_get_adv_lp,
};
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries,
- int *synopsys_id)
+int dwmac1000_setup(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
- u32 hwid = readl(ioaddr + GMAC_VERSION);
+ struct mac_device_info *mac = priv->hw;
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- if (!mac)
- return NULL;
+ dev_info(priv->device, "\tDWMAC1000\n");
- mac->pcsr = ioaddr;
- mac->multicast_filter_bins = mcbins;
- mac->unicast_filter_entries = perfect_uc_entries;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
mac->mcast_bits_log2 = 0;
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->mac = &dwmac1000_ops;
- mac->dma = &dwmac1000_dma_ops;
-
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
@@ -555,8 +549,5 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->mii.clk_csr_shift = 2;
mac->mii.clk_csr_mask = GENMASK(5, 2);
- /* Get and dump the chip ID */
- *synopsys_id = stmmac_get_synopsys_id(hwid);
-
- return mac;
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 7ecf549c7f1c..aacc4aa80e3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
}
static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
@@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
- /* RX/TX descriptor base address lists must be written into
- * DMA CSR3 and CSR4, respectively
- */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base address list must be written into DMA CSR3 */
+ writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base address list must be written into DMA CSR4 */
+ writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -148,12 +157,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
return csr6;
}
-static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (mode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (mode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (mode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ /* Configure flow control based on rx fifo size */
+ csr6 = dwmac1000_configure_fc(csr6, fifosz);
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
- if (txmode == SF_DMA_MODE) {
+ if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
@@ -162,42 +199,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
*/
csr6 |= DMA_CONTROL_OSF;
} else {
- pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
/* Set the transmit threshold */
- if (txmode <= 32)
+ if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
- else if (txmode <= 128)
+ else if (mode <= 128)
csr6 |= DMA_CONTROL_TTC_128;
- else if (txmode <= 192)
+ else if (mode <= 192)
csr6 |= DMA_CONTROL_TTC_192;
else
csr6 |= DMA_CONTROL_TTC_256;
}
- if (rxmode == SF_DMA_MODE) {
- pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
- } else {
- pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
- csr6 &= ~DMA_CONTROL_RSF;
- csr6 &= DMA_CONTROL_TC_RX_MASK;
- if (rxmode <= 32)
- csr6 |= DMA_CONTROL_RTC_32;
- else if (rxmode <= 64)
- csr6 |= DMA_CONTROL_RTC_64;
- else if (rxmode <= 96)
- csr6 |= DMA_CONTROL_RTC_96;
- else
- csr6 |= DMA_CONTROL_RTC_128;
- }
-
- /* Configure flow control based on rx fifo size */
- csr6 = dwmac1000_configure_fc(csr6, rxfifosz);
-
writel(csr6, ioaddr + DMA_CONTROL);
}
@@ -256,9 +273,12 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac1000_dma_init,
+ .init_rx_chan = dwmac1000_dma_init_rx,
+ .init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
.dump_regs = dwmac1000_dump_dma_regs,
- .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
+ .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 91b23f9db31a..b735143987e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -27,6 +27,7 @@
#include <linux/crc32.h>
#include <net/dsa.h>
#include <asm/io.h>
+#include "stmmac.h"
#include "dwmac100.h"
static void dwmac100_core_init(struct mac_device_info *hw,
@@ -159,7 +160,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
return;
}
-static const struct stmmac_ops dwmac100_ops = {
+const struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac100_rx_ipc_enable,
@@ -172,20 +173,13 @@ static const struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
+int dwmac100_setup(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
+ struct mac_device_info *mac = priv->hw;
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- if (!mac)
- return NULL;
-
- pr_info("\tDWMAC100\n");
-
- mac->pcsr = ioaddr;
- mac->mac = &dwmac100_ops;
- mac->dma = &dwmac100_dma_ops;
+ dev_info(priv->device, "\tDWMAC100\n");
+ mac->pcsr = priv->ioaddr;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
@@ -200,8 +194,5 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
mac->mii.clk_csr_shift = 2;
mac->mii.clk_csr_mask = GENMASK(5, 2);
- /* Synopsys Id is not available on old chips */
- *synopsys_id = 0;
-
- return mac;
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 6502b9aa3bf5..21dee25ee570 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -29,8 +29,7 @@
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
- /* RX/TX descriptor base addr lists must be written into
- * DMA CSR3 and CSR4, respectively
- */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+static void dwmac100_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base addr lists must be written into DMA CSR3 */
+ writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac100_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base addr lists must be written into DMA CSR4 */
+ writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
}
/* Store and Forward capability is not used at all.
@@ -51,14 +60,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
* The transmit threshold can be programmed by setting the TTC bits in the DMA
* control register.
*/
-static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
- if (txmode <= 32)
+ if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
else
csr6 |= DMA_CONTROL_TTC_128;
@@ -112,8 +121,10 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
const struct stmmac_dma_ops dwmac100_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac100_dma_init,
+ .init_rx_chan = dwmac100_dma_init_rx,
+ .init_tx_chan = dwmac100_dma_init_tx,
.dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
+ .dma_tx_mode = dwmac100_dma_operation_mode_tx,
.dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index dedd40613090..eb013d54025a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -34,7 +34,6 @@
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
-#define GMAC_VERSION 0x00000110
#define GMAC_DEBUG 0x00000114
#define GMAC_HW_FEATURE0 0x0000011c
#define GMAC_HW_FEATURE1 0x00000120
@@ -188,6 +187,7 @@ enum power_event {
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24)
#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6)
@@ -195,6 +195,9 @@ enum power_event {
/* MAC HW features3 bitmap */
#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
+#define GMAC_HW_FEAT_FRPES GENMASK(14, 13)
+#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
+#define GMAC_HW_FEAT_FRPSEL BIT(10)
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
@@ -203,6 +206,7 @@ enum power_event {
/* MTL registers */
#define MTL_OPERATION_MODE 0x00000c00
+#define MTL_FRPE BIT(15)
#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 517b1f6736a8..7e5d5db0d516 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -18,6 +18,7 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <net/dsa.h>
+#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
#include "dwmac5.h"
@@ -700,7 +701,7 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
-static const struct stmmac_ops dwmac4_ops = {
+const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -731,7 +732,7 @@ static const struct stmmac_ops dwmac4_ops = {
.set_filter = dwmac4_set_filter,
};
-static const struct stmmac_ops dwmac410_ops = {
+const struct stmmac_ops dwmac410_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -762,7 +763,7 @@ static const struct stmmac_ops dwmac410_ops = {
.set_filter = dwmac4_set_filter,
};
-static const struct stmmac_ops dwmac510_ops = {
+const struct stmmac_ops dwmac510_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -794,21 +795,20 @@ static const struct stmmac_ops dwmac510_ops = {
.safety_feat_config = dwmac5_safety_feat_config,
.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
.safety_feat_dump = dwmac5_safety_feat_dump,
+ .rxp_config = dwmac5_rxp_config,
+ .flex_pps_config = dwmac5_flex_pps_config,
};
-struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries, int *synopsys_id)
+int dwmac4_setup(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
- u32 hwid = readl(ioaddr + GMAC_VERSION);
+ struct mac_device_info *mac = priv->hw;
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- if (!mac)
- return NULL;
+ dev_info(priv->device, "\tDWMAC4/5\n");
- mac->pcsr = ioaddr;
- mac->multicast_filter_bins = mcbins;
- mac->unicast_filter_entries = perfect_uc_entries;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
mac->mcast_bits_log2 = 0;
if (mac->multicast_filter_bins)
@@ -828,20 +828,5 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- /* Get and dump the chip ID */
- *synopsys_id = stmmac_get_synopsys_id(hwid);
-
- if (*synopsys_id > DWMAC_CORE_4_00)
- mac->dma = &dwmac410_dma_ops;
- else
- mac->dma = &dwmac4_dma_ops;
-
- if (*synopsys_id >= DWMAC_CORE_5_10)
- mac->mac = &dwmac510_ops;
- else if (*synopsys_id >= DWMAC_CORE_4_00)
- mac->mac = &dwmac410_ops;
- else
- mac->mac = &dwmac4_ops;
-
- return mac;
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 2a6521d33e43..20299f6f65fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
p->des3 |= cpu_to_le32(TDES3_OWN);
}
-static void dwmac4_set_rx_owner(struct dma_desc *p)
+static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(RDES3_OWN);
+ p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
@@ -223,7 +226,7 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
return 0;
}
-static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
+static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
{
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
@@ -232,7 +235,7 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
/* convert high/sec time stamp value to nanosecond */
ns += le32_to_cpu(p->des1) * 1000000000ULL;
- return ns;
+ *ts = ns;
}
static int dwmac4_rx_check_timestamp(void *desc)
@@ -292,10 +295,7 @@ exit:
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
-
- if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ dwmac4_set_rx_owner(p, disable_rx_ic);
}
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
@@ -424,6 +424,25 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
}
+static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des0);
+}
+
+static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(addr);
+ p->des1 = 0;
+}
+
+static void dwmac4_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -445,6 +464,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.init_tx_desc = dwmac4_rd_init_tx_desc,
.display_ring = dwmac4_display_ring,
.set_mss = dwmac4_set_mss_ctxt,
+ .get_addr = dwmac4_get_addr,
+ .set_addr = dwmac4_set_addr,
+ .clear = dwmac4_clear,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37d457306d1..d37f17ca62fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+
+ /* Enable OSP to get best performance */
+ value |= DMA_CONTROL_OSP;
+
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
@@ -116,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
@@ -370,6 +373,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
dma_cap->number_tx_queues =
((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
+ /* PPS output */
+ dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24;
/* IEEE 1588-2002 */
dma_cap->time_stamp = 0;
@@ -379,6 +384,9 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
+ dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
+ dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
+ dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
}
/* Enable/disable TSO feature and set MSS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 8474bf961dd0..c63c1fe3f26b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -184,7 +184,6 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 860de39999c7..3f4f3132e16b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -7,6 +7,8 @@
#include "common.h"
#include "dwmac4.h"
#include "dwmac5.h"
+#include "stmmac.h"
+#include "stmmac_ptp.h"
struct dwmac5_error_desc {
bool valid;
@@ -237,15 +239,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
return 0;
}
-bool dwmac5_safety_feat_irq_status(struct net_device *ndev,
+int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_stats *stats)
{
- bool ret = false, err, corr;
+ bool err, corr;
u32 mtl, dma;
+ int ret = 0;
if (!asp)
- return false;
+ return -EINVAL;
mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS);
dma = readl(ioaddr + DMA_SAFETY_INT_STATUS);
@@ -282,17 +285,267 @@ static const struct dwmac5_error {
{ dwmac5_dma_errors },
};
-const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
- int index, unsigned long *count)
+int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count, const char **desc)
{
int module = index / 32, offset = index % 32;
unsigned long *ptr = (unsigned long *)stats;
if (module >= ARRAY_SIZE(dwmac5_all_errors))
- return NULL;
+ return -EINVAL;
if (!dwmac5_all_errors[module].desc[offset].valid)
- return NULL;
+ return -EINVAL;
if (count)
*count = *(ptr + index);
- return dwmac5_all_errors[module].desc[offset].desc;
+ if (desc)
+ *desc = dwmac5_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwmac5_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val;
+ int ret;
+
+ val = readl(ioaddr + MTL_OPERATION_MODE);
+ val &= ~MTL_FRPE;
+ writel(val, ioaddr + MTL_OPERATION_MODE);
+
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
+ val & RXPI, 1, 10000);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static void dwmac5_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + MTL_OPERATION_MODE);
+ val |= MTL_FRPE;
+ writel(val, ioaddr + MTL_OPERATION_MODE);
+}
+
+static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
+ val, !(val & STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & ADDR;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Write OP */
+ val |= WRRDN;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Start Write */
+ val |= STARTBUSY;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
+ val, !(val & STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count,
+ u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + GMAC_CONFIG);
+ val = old_val & ~GMAC_CONFIG_RE;
+ writel(val, ioaddr + GMAC_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwmac5_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & NPE;
+ val |= nve & NVE;
+ writel(val, ioaddr + MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwmac5_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + GMAC_CONFIG);
+ return ret;
+}
+
+int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + MAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~PPSx_MASK(index);
+
+ if (!enable) {
+ val |= PPSCMDx(index, 0x5);
+ writel(val, ioaddr + MAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= PPSCMDx(index, 0x2);
+ val |= TRGTMODSELx(index, 0x2);
+ val |= PPSEN0;
+
+ writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ writel(val, ioaddr + MAC_PPS_CONTROL);
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index a0d2c44711b9..775db776b3cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,6 +11,36 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
+#define MAC_PPS_CONTROL 0x00000b70
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x))
+#define MCGRENx(x) BIT(PPS_MAXIDX(x))
+#define TRGTMODSELx(x, val) \
+ GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \
+ ((val) << (PPS_MAXIDX(x) - 2))
+#define PPSCMDx(x, val) \
+ GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \
+ ((val) << PPS_MINIDX(x))
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10))
+#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10))
+#define TRGTBUSY0 BIT(31)
+#define TTSL0 GENMASK(30, 0)
+#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+
+#define MTL_RXP_CONTROL_STATUS 0x00000ca0
+#define RXPI BIT(31)
+#define NPE GENMASK(23, 16)
+#define NVE GENMASK(7, 0)
+#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0
+#define STARTBUSY BIT(31)
+#define RXPEIEC GENMASK(22, 21)
+#define RXPEIEE BIT(20)
+#define WRRDN BIT(16)
+#define ADDR GENMASK(15, 0)
+#define MTL_RXP_IACC_DATA 0x00000cb4
#define MTL_ECC_CONTROL 0x00000cc0
#define TSOEE BIT(4)
#define MRXPEE BIT(3)
@@ -43,10 +73,15 @@
#define DMA_ECC_INT_STATUS 0x00001088
int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
-bool dwmac5_safety_feat_irq_status(struct net_device *ndev,
+int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_stats *stats);
-const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
- int index, unsigned long *count);
+int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count, const char **desc);
+int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count);
+int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 6768a25b6aa0..77914c89d749 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(ETDES0_OWN);
}
-static void enh_desc_set_rx_owner(struct dma_desc *p)
+static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
}
@@ -382,7 +382,7 @@ static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
}
-static u64 enh_desc_get_timestamp(void *desc, u32 ats)
+static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
{
u64 ns;
@@ -397,7 +397,7 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
ns += le32_to_cpu(p->des3) * 1000000000ULL;
}
- return ns;
+ *ts = ns;
}
static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
@@ -437,6 +437,21 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n");
}
+static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des2);
+}
+
+static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void enh_desc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -457,4 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
.display_ring = enh_desc_display_ring,
+ .get_addr = enh_desc_get_addr,
+ .set_addr = enh_desc_set_addr,
+ .clear = enh_desc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
new file mode 100644
index 000000000000..14770fc8865e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac HW Interface Handling
+ */
+
+#include "common.h"
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
+{
+ u32 reg = readl(priv->ioaddr + id_reg);
+
+ if (!reg) {
+ dev_info(priv->device, "Version ID not available\n");
+ return 0x0;
+ }
+
+ dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
+ (unsigned int)(reg & GENMASK(15, 8)) >> 8,
+ (unsigned int)(reg & GENMASK(7, 0)));
+ return reg & GENMASK(7, 0);
+}
+
+static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ if (priv->chain_mode) {
+ dev_info(priv->device, "Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ mac->mode = &chain_mode_ops;
+ } else {
+ dev_info(priv->device, "Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ mac->mode = &ring_mode_ops;
+ }
+}
+
+static int stmmac_dwmac1_quirks(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ if (priv->plat->enh_desc) {
+ dev_info(priv->device, "Enhanced/Alternate descriptors\n");
+
+ /* GMAC older than 3.50 has no extended descriptors */
+ if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+ dev_info(priv->device, "Enabled extended descriptors\n");
+ priv->extend_desc = 1;
+ } else {
+ dev_warn(priv->device, "Extended descriptors not supported\n");
+ }
+
+ mac->desc = &enh_desc_ops;
+ } else {
+ dev_info(priv->device, "Normal descriptors\n");
+ mac->desc = &ndesc_ops;
+ }
+
+ stmmac_dwmac_mode_quirk(priv);
+ return 0;
+}
+
+static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
+{
+ stmmac_dwmac_mode_quirk(priv);
+ return 0;
+}
+
+static const struct stmmac_hwif_entry {
+ bool gmac;
+ bool gmac4;
+ u32 min_id;
+ const struct stmmac_regs_off regs;
+ const void *desc;
+ const void *dma;
+ const void *mac;
+ const void *hwtimestamp;
+ const void *mode;
+ const void *tc;
+ int (*setup)(struct stmmac_priv *priv);
+ int (*quirks)(struct stmmac_priv *priv);
+} stmmac_hw[] = {
+ /* NOTE: New HW versions shall go to the end of this table */
+ {
+ .gmac = false,
+ .gmac4 = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
+ .desc = NULL,
+ .dma = &dwmac100_dma_ops,
+ .mac = &dwmac100_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwmac100_setup,
+ .quirks = stmmac_dwmac1_quirks,
+ }, {
+ .gmac = true,
+ .gmac4 = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
+ .desc = NULL,
+ .dma = &dwmac1000_dma_ops,
+ .mac = &dwmac1000_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwmac1000_setup,
+ .quirks = stmmac_dwmac1_quirks,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac4_dma_ops,
+ .mac = &dwmac4_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwmac4_setup,
+ .quirks = stmmac_dwmac4_quirks,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = DWMAC_CORE_4_00,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac4_dma_ops,
+ .mac = &dwmac410_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = NULL,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = DWMAC_CORE_4_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac410_dma_ops,
+ .mac = &dwmac410_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = NULL,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = DWMAC_CORE_5_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac410_dma_ops,
+ .mac = &dwmac510_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = &dwmac510_tc_ops,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }
+};
+
+int stmmac_hwif_init(struct stmmac_priv *priv)
+{
+ bool needs_gmac4 = priv->plat->has_gmac4;
+ bool needs_gmac = priv->plat->has_gmac;
+ const struct stmmac_hwif_entry *entry;
+ struct mac_device_info *mac;
+ bool needs_setup = true;
+ int i, ret;
+ u32 id;
+
+ if (needs_gmac) {
+ id = stmmac_get_id(priv, GMAC_VERSION);
+ } else if (needs_gmac4) {
+ id = stmmac_get_id(priv, GMAC4_VERSION);
+ } else {
+ id = 0;
+ }
+
+ /* Save ID for later use */
+ priv->synopsys_id = id;
+
+ /* Lets assume some safe values first */
+ priv->ptpaddr = priv->ioaddr +
+ (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
+ priv->mmcaddr = priv->ioaddr +
+ (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+
+ /* Check for HW specific setup first */
+ if (priv->plat->setup) {
+ mac = priv->plat->setup(priv);
+ needs_setup = false;
+ } else {
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ }
+
+ if (!mac)
+ return -ENOMEM;
+
+ /* Fallback to generic HW */
+ for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
+ entry = &stmmac_hw[i];
+
+ if (needs_gmac ^ entry->gmac)
+ continue;
+ if (needs_gmac4 ^ entry->gmac4)
+ continue;
+ /* Use synopsys_id var because some setups can override this */
+ if (priv->synopsys_id < entry->min_id)
+ continue;
+
+ /* Only use generic HW helpers if needed */
+ mac->desc = mac->desc ? : entry->desc;
+ mac->dma = mac->dma ? : entry->dma;
+ mac->mac = mac->mac ? : entry->mac;
+ mac->ptp = mac->ptp ? : entry->hwtimestamp;
+ mac->mode = mac->mode ? : entry->mode;
+ mac->tc = mac->tc ? : entry->tc;
+
+ priv->hw = mac;
+ priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
+ priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+
+ /* Entry found */
+ if (needs_setup) {
+ ret = entry->setup(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Run quirks, if needed */
+ if (entry->quirks) {
+ ret = entry->quirks(priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
+ id, needs_gmac, needs_gmac4);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
new file mode 100644
index 000000000000..e44e7b26ce82
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+// stmmac HW Interface Callbacks
+
+#ifndef __STMMAC_HWIF_H__
+#define __STMMAC_HWIF_H__
+
+#include <linux/netdevice.h>
+
+#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
+({ \
+ int __result = -EINVAL; \
+ if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \
+ (__priv)->hw->__module->__cname((__arg0), ##__args); \
+ __result = 0; \
+ } \
+ __result; \
+})
+#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \
+({ \
+ int __result = -EINVAL; \
+ if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \
+ __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \
+ __result; \
+})
+
+struct stmmac_extra_stats;
+struct stmmac_safety_stats;
+struct dma_desc;
+struct dma_extended_desc;
+
+/* Descriptors helpers */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
+ int end);
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own, bool ls,
+ unsigned int tot_pkt_len);
+ void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+ int len2, bool tx_own, bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner)(struct dma_desc *p);
+ int (*get_tx_owner)(struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc)(struct dma_desc *p, int mode);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*set_tx_ic)(struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls)(struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status)(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len)(struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status)(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+ void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p);
+ /* Set tx timestamp enable bit */
+ void (*enable_tx_timestamp) (struct dma_desc *p);
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status) (struct dma_desc *p);
+ /* get timestamp value */
+ void (*get_timestamp)(void *desc, u32 ats, u64 *ts);
+ /* get rx timestamp status */
+ int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
+ /* Display ring */
+ void (*display_ring)(void *head, unsigned int size, bool rx);
+ /* set MSS via context descriptor */
+ void (*set_mss)(struct dma_desc *p, unsigned int mss);
+ /* get descriptor skbuff address */
+ void (*get_addr)(struct dma_desc *p, unsigned int *addr);
+ /* set descriptor skbuff address */
+ void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
+ /* clear descriptor */
+ void (*clear)(struct dma_desc *p);
+};
+
+#define stmmac_init_rx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, init_rx_desc, __args)
+#define stmmac_init_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, init_tx_desc, __args)
+#define stmmac_prepare_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args)
+#define stmmac_prepare_tso_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args)
+#define stmmac_set_tx_owner(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tx_owner, __args)
+#define stmmac_get_tx_owner(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_owner, __args)
+#define stmmac_release_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, release_tx_desc, __args)
+#define stmmac_set_tx_ic(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tx_ic, __args)
+#define stmmac_get_tx_ls(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_ls, __args)
+#define stmmac_tx_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, tx_status, __args)
+#define stmmac_get_tx_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_len, __args)
+#define stmmac_set_rx_owner(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_rx_owner, __args)
+#define stmmac_get_rx_frame_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_frame_len, __args)
+#define stmmac_rx_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, rx_status, __args)
+#define stmmac_rx_extended_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, rx_extended_status, __args)
+#define stmmac_enable_tx_timestamp(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args)
+#define stmmac_get_tx_timestamp_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args)
+#define stmmac_get_timestamp(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, get_timestamp, __args)
+#define stmmac_get_rx_timestamp_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args)
+#define stmmac_display_ring(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, display_ring, __args)
+#define stmmac_set_mss(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_mss, __args)
+#define stmmac_get_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, get_addr, __args)
+#define stmmac_set_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_addr, __args)
+#define stmmac_clear_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, clear, __args)
+
+struct stmmac_dma_cfg;
+struct dma_features;
+
+/* Specific DMA helpers */
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*reset)(void __iomem *ioaddr);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
+ int atds);
+ void (*init_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan);
+ void (*init_rx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan);
+ void (*init_tx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan);
+ /* Configure the AXI Bus Mode Register */
+ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
+ /* Dump DMA registers */
+ void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
+ void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz, u8 qmode);
+ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz, u8 qmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*start_tx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_tx)(void __iomem *ioaddr, u32 chan);
+ void (*start_rx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_rx)(void __iomem *ioaddr, u32 chan);
+ int (*dma_interrupt) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan);
+ /* If supported then get the optional core features */
+ void (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
+ /* Program the HW RX Watchdog */
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+ void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+};
+
+#define stmmac_reset(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, reset, __args)
+#define stmmac_dma_init(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init, __args)
+#define stmmac_init_chan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init_chan, __args)
+#define stmmac_init_rx_chan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init_rx_chan, __args)
+#define stmmac_init_tx_chan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init_tx_chan, __args)
+#define stmmac_axi(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, axi, __args)
+#define stmmac_dump_dma_regs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dump_regs, __args)
+#define stmmac_dma_rx_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args)
+#define stmmac_dma_tx_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args)
+#define stmmac_dma_diagnostic_fr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
+#define stmmac_enable_dma_transmission(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
+#define stmmac_enable_dma_irq(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args)
+#define stmmac_disable_dma_irq(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args)
+#define stmmac_start_tx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, start_tx, __args)
+#define stmmac_stop_tx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, stop_tx, __args)
+#define stmmac_start_rx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, start_rx, __args)
+#define stmmac_stop_rx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, stop_rx, __args)
+#define stmmac_dma_interrupt_status(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, dma_interrupt, __args)
+#define stmmac_get_hw_feature(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
+#define stmmac_rx_watchdog(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
+#define stmmac_set_tx_ring_len(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args)
+#define stmmac_set_rx_ring_len(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args)
+#define stmmac_set_rx_tail_ptr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args)
+#define stmmac_set_tx_tail_ptr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
+#define stmmac_enable_tso(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+
+struct mac_device_info;
+struct net_device;
+struct rgmii_adv;
+struct stmmac_safety_stats;
+struct stmmac_tc_entry;
+struct stmmac_pps_cfg;
+
+/* Helpers to program the MAC core */
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
+ /* Enable the MAC RX/TX */
+ void (*set_mac)(void __iomem *ioaddr, bool enable);
+ /* Enable and verify that the IPC module is supported */
+ int (*rx_ipc)(struct mac_device_info *hw);
+ /* Enable RX Queues */
+ void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
+ /* RX Queues Priority */
+ void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* TX Queues Priority */
+ void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* RX Queues Routing */
+ void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
+ u32 queue);
+ /* Program RX Algorithms */
+ void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
+ /* Program TX Algorithms */
+ void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
+ /* Set MTL TX queues weight */
+ void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+ u32 weight, u32 queue);
+ /* RX MTL queue to RX dma mapping */
+ void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
+ /* Configure AV Algorithm */
+ void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
+ u32 idle_slope, u32 high_credit, u32 low_credit,
+ u32 queue);
+ /* Dump MAC registers */
+ void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*host_irq_status)(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x);
+ /* Handle MTL interrupts */
+ int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
+ /* Multicast filter setting */
+ void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
+ /* Flow control setting */
+ void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time, u32 tx_cnt);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt)(struct mac_device_info *hw, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+ unsigned int reg_n);
+ void (*set_eee_mode)(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating);
+ void (*reset_eee_mode)(struct mac_device_info *hw);
+ void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
+ void (*set_eee_pls)(struct mac_device_info *hw, int link);
+ void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues);
+ /* PCS calls */
+ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback);
+ void (*pcs_rane)(void __iomem *ioaddr, bool restart);
+ void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
+ /* Safety Features */
+ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp);
+ int (*safety_feat_irq_status)(struct net_device *ndev,
+ void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_stats *stats);
+ int (*safety_feat_dump)(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count, const char **desc);
+ /* Flexible RX Parser */
+ int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count);
+ /* Flexible PPS */
+ int (*flex_pps_config)(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags);
+};
+
+#define stmmac_core_init(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, core_init, __args)
+#define stmmac_mac_set(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mac, __args)
+#define stmmac_rx_ipc(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rx_ipc, __args)
+#define stmmac_rx_queue_enable(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args)
+#define stmmac_rx_queue_prio(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args)
+#define stmmac_tx_queue_prio(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args)
+#define stmmac_rx_queue_routing(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args)
+#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args)
+#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args)
+#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args)
+#define stmmac_map_mtl_to_dma(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args)
+#define stmmac_config_cbs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, config_cbs, __args)
+#define stmmac_dump_mac_regs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, dump_regs, __args)
+#define stmmac_host_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, host_irq_status, __args)
+#define stmmac_host_mtl_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args)
+#define stmmac_set_filter(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_filter, __args)
+#define stmmac_flow_ctrl(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, flow_ctrl, __args)
+#define stmmac_pmt(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pmt, __args)
+#define stmmac_set_umac_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
+#define stmmac_get_umac_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
+#define stmmac_set_eee_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
+#define stmmac_reset_eee_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
+#define stmmac_set_eee_timer(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
+#define stmmac_set_eee_pls(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_pls, __args)
+#define stmmac_mac_debug(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, debug, __args)
+#define stmmac_pcs_ctrl_ane(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
+#define stmmac_pcs_rane(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pcs_rane, __args)
+#define stmmac_pcs_get_adv_lp(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
+#define stmmac_safety_feat_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, safety_feat_config, __args)
+#define stmmac_safety_feat_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args)
+#define stmmac_safety_feat_dump(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, safety_feat_dump, __args)
+#define stmmac_rxp_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rxp_config, __args)
+#define stmmac_flex_pps_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, flex_pps_config, __args)
+
+/* PTP and HW Timer helpers */
+struct stmmac_hwtimestamp {
+ void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
+ void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+ int gmac4, u32 *ssinc);
+ int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
+ int (*config_addend) (void __iomem *ioaddr, u32 addend);
+ int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub, int gmac4);
+ void (*get_systime) (void __iomem *ioaddr, u64 *systime);
+};
+
+#define stmmac_config_hw_tstamping(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args)
+#define stmmac_config_sub_second_increment(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args)
+#define stmmac_init_systime(__priv, __args...) \
+ stmmac_do_callback(__priv, ptp, init_systime, __args)
+#define stmmac_config_addend(__priv, __args...) \
+ stmmac_do_callback(__priv, ptp, config_addend, __args)
+#define stmmac_adjust_systime(__priv, __args...) \
+ stmmac_do_callback(__priv, ptp, adjust_systime, __args)
+#define stmmac_get_systime(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, get_systime, __args)
+
+/* Helpers to manage the descriptors for chain and ring modes */
+struct stmmac_mode_ops {
+ void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
+ unsigned int extend_desc);
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
+ int (*set_16kib_bfsize)(int mtu);
+ void (*init_desc3)(struct dma_desc *p);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
+};
+
+#define stmmac_mode_init(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, init, __args)
+#define stmmac_is_jumbo_frm(__priv, __args...) \
+ stmmac_do_callback(__priv, mode, is_jumbo_frm, __args)
+#define stmmac_jumbo_frm(__priv, __args...) \
+ stmmac_do_callback(__priv, mode, jumbo_frm, __args)
+#define stmmac_set_16kib_bfsize(__priv, __args...) \
+ stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args)
+#define stmmac_init_desc3(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, init_desc3, __args)
+#define stmmac_refill_desc3(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, refill_desc3, __args)
+#define stmmac_clean_desc3(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, clean_desc3, __args)
+
+struct stmmac_priv;
+struct tc_cls_u32_offload;
+
+struct stmmac_tc_ops {
+ int (*init)(struct stmmac_priv *priv);
+ int (*setup_cls_u32)(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls);
+};
+
+#define stmmac_tc_init(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, init, __args)
+#define stmmac_tc_setup_cls_u32(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+
+struct stmmac_regs_off {
+ u32 ptp_off;
+ u32 mmc_off;
+};
+
+extern const struct stmmac_ops dwmac100_ops;
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
+extern const struct stmmac_ops dwmac1000_ops;
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+extern const struct stmmac_ops dwmac4_ops;
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_ops dwmac410_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac510_tc_ops;
+
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
+
+int stmmac_hwif_init(struct stmmac_priv *priv);
+
+#endif /* __STMMAC_HWIF_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index ebd9e5e00f16..de65bb29feba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(TDES0_OWN);
}
-static void ndesc_set_rx_owner(struct dma_desc *p)
+static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
}
@@ -253,7 +253,7 @@ static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
}
-static u64 ndesc_get_timestamp(void *desc, u32 ats)
+static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts)
{
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
@@ -262,7 +262,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
/* convert high/sec time stamp value to nanosecond */
ns += le32_to_cpu(p->des3) * 1000000000ULL;
- return ns;
+ *ts = ns;
}
static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
@@ -297,6 +297,21 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n");
}
+static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des2);
+}
+
+static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void ndesc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -316,4 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
.display_ring = ndesc_display_ring,
+ .get_addr = ndesc_get_addr,
+ .set_addr = ndesc_set_addr,
+ .clear = ndesc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 28e4b5d50ce6..a7ffc73fffe8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -24,7 +24,7 @@
#include "stmmac.h"
-static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
@@ -58,9 +58,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
- STMMAC_RING_MODE, 0,
- false, skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
+ STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@@ -79,9 +78,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_RING_MODE, 1,
- true, skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
+ STMMAC_RING_MODE, 1, true, skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
@@ -92,9 +90,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].len = nopaged_len;
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 0,
- true, skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
+ STMMAC_RING_MODE, 0, true, skb->len);
}
tx_q->cur_tx = entry;
@@ -102,7 +99,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
return entry;
}
-static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+static unsigned int is_jumbo_frm(int len, int enh_desc)
{
unsigned int ret = 0;
@@ -112,7 +109,7 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+static void refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
@@ -122,12 +119,12 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
}
/* In ring mode we need to fill the desc3 because it is used as buffer */
-static void stmmac_init_desc3(struct dma_desc *p)
+static void init_desc3(struct dma_desc *p)
{
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
-static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+static void clean_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
struct stmmac_priv *priv = tx_q->priv_data;
@@ -140,7 +137,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
p->des3 = 0;
}
-static int stmmac_set_16kib_bfsize(int mtu)
+static int set_16kib_bfsize(int mtu)
{
int ret = 0;
if (unlikely(mtu >= BUF_SIZE_8KiB))
@@ -149,10 +146,10 @@ static int stmmac_set_16kib_bfsize(int mtu)
}
const struct stmmac_mode_ops ring_mode_ops = {
- .is_jumbo_frm = stmmac_is_jumbo_frm,
- .jumbo_frm = stmmac_jumbo_frm,
- .refill_desc3 = stmmac_refill_desc3,
- .init_desc3 = stmmac_init_desc3,
- .clean_desc3 = stmmac_clean_desc3,
- .set_16kib_bfsize = stmmac_set_16kib_bfsize,
+ .is_jumbo_frm = is_jumbo_frm,
+ .jumbo_frm = jumbo_frm,
+ .refill_desc3 = refill_desc3,
+ .init_desc3 = init_desc3,
+ .clean_desc3 = clean_desc3,
+ .set_16kib_bfsize = set_16kib_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index da50451f8999..025efbf6145c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -76,11 +76,43 @@ struct stmmac_rx_queue {
struct napi_struct napi ____cacheline_aligned_in_smp;
};
+struct stmmac_tc_entry {
+ bool in_use;
+ bool in_hw;
+ bool is_last;
+ bool is_frag;
+ void *frag_ptr;
+ unsigned int table_pos;
+ u32 handle;
+ u32 prio;
+ struct {
+ u32 match_data;
+ u32 match_en;
+ u8 af:1;
+ u8 rf:1;
+ u8 im:1;
+ u8 nc:1;
+ u8 res1:4;
+ u8 frame_offset;
+ u8 ok_index;
+ u8 dma_ch_no;
+ u32 res2;
+ } __packed val;
+};
+
+#define STMMAC_PPS_MAX 4
+struct stmmac_pps_cfg {
+ bool available;
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
+ bool tx_timer_armed;
int tx_coalesce;
int hwts_tx_en;
@@ -97,7 +129,7 @@ struct stmmac_priv {
struct net_device *dev;
struct device *device;
struct mac_device_info *hw;
- spinlock_t lock;
+ struct mutex lock;
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
@@ -130,10 +162,13 @@ struct stmmac_priv {
int eee_active;
int tx_lpi_timer;
unsigned int mode;
+ unsigned int chain_mode;
int extend_desc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
+ u32 sub_second_inc;
+ u32 systime_flags;
u32 adv_ts;
int use_riwt;
int irq_wake;
@@ -150,6 +185,14 @@ struct stmmac_priv {
unsigned long state;
struct workqueue_struct *wq;
struct work_struct service_task;
+
+ /* TC Handling */
+ unsigned int tc_entries_max;
+ unsigned int tc_off_max;
+ struct stmmac_tc_entry *tc_entries;
+
+ /* Pulse Per Second output */
+ struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
};
enum stmmac_state {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 2c6ed47704fc..5710864fa809 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -291,11 +291,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.speed = priv->xstats.pcs_speed;
/* Get and convert ADV/LP_ADV from the HW AN registers */
- if (!priv->hw->mac->pcs_get_adv_lp)
+ if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
return -EOPNOTSUPP; /* should never happen indeed */
- priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
-
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
ethtool_convert_link_mode_to_legacy_u32(
@@ -392,13 +390,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full);
- spin_lock(&priv->lock);
-
- if (priv->hw->mac->pcs_ctrl_ane)
- priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
- priv->hw->ps, 0);
-
- spin_unlock(&priv->lock);
+ mutex_lock(&priv->lock);
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -442,8 +436,8 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
memset(reg_space, 0x0, REG_SPACE_SIZE);
- priv->hw->mac->dump_regs(priv->hw, reg_space);
- priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
+ stmmac_dump_mac_regs(priv, priv->hw, reg_space);
+ stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
/* Copy DMA registers to where ethtool expects them */
memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
@@ -454,15 +448,13 @@ stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ struct rgmii_adv adv_lp;
pause->rx_pause = 0;
pause->tx_pause = 0;
- if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
- struct rgmii_adv adv_lp;
-
+ if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
pause->autoneg = 1;
- priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
if (!adv_lp.pause)
return;
} else {
@@ -488,12 +480,10 @@ stmmac_set_pauseparam(struct net_device *netdev,
u32 tx_cnt = priv->plat->tx_queues_to_use;
struct phy_device *phy = netdev->phydev;
int new_pause = FLOW_OFF;
+ struct rgmii_adv adv_lp;
- if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
- struct rgmii_adv adv_lp;
-
+ if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
pause->autoneg = 1;
- priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
if (!adv_lp.pause)
return -EOPNOTSUPP;
} else {
@@ -515,37 +505,32 @@ stmmac_set_pauseparam(struct net_device *netdev,
return phy_start_aneg(phy);
}
- priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
return 0;
}
static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
- const char *(*dump)(struct stmmac_safety_stats *stats, int index,
- unsigned long *count);
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_queues_count = priv->plat->rx_queues_to_use;
u32 tx_queues_count = priv->plat->tx_queues_to_use;
unsigned long count;
- int i, j = 0;
-
- if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
- dump = priv->hw->mac->safety_feat_dump;
+ int i, j = 0, ret;
+ if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
- if (dump(&priv->sstats, i, &count))
+ if (!stmmac_safety_feat_dump(priv, &priv->sstats, i,
+ &count, NULL))
data[j++] = count;
}
}
/* Update the DMA HW counters for dwmac10/100 */
- if (priv->hw->dma->dma_diagnostic_fr)
- priv->hw->dma->dma_diagnostic_fr(&dev->stats,
- (void *) &priv->xstats,
- priv->ioaddr);
- else {
+ ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats,
+ priv->ioaddr);
+ if (ret) {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
@@ -565,11 +550,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
priv->xstats.phy_eee_wakeup_error_n = val;
}
- if ((priv->hw->mac->debug) &&
- (priv->synopsys_id >= DWMAC_CORE_3_50))
- priv->hw->mac->debug(priv->ioaddr,
- (void *)&priv->xstats,
- rx_queues_count, tx_queues_count);
+ if (priv->synopsys_id >= DWMAC_CORE_3_50)
+ stmmac_mac_debug(priv, priv->ioaddr,
+ (void *)&priv->xstats,
+ rx_queues_count, tx_queues_count);
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -581,8 +565,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- const char *(*dump)(struct stmmac_safety_stats *stats, int index,
- unsigned long *count);
int i, len, safety_len = 0;
switch (sset) {
@@ -591,11 +573,11 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
if (priv->dma_cap.rmon)
len += STMMAC_MMC_STATS_LEN;
- if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
- dump = priv->hw->mac->safety_feat_dump;
-
+ if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
- if (dump(&priv->sstats, i, NULL))
+ if (!stmmac_safety_feat_dump(priv,
+ &priv->sstats, i,
+ NULL, NULL))
safety_len++;
}
@@ -613,17 +595,15 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
int i;
u8 *p = data;
struct stmmac_priv *priv = netdev_priv(dev);
- const char *(*dump)(struct stmmac_safety_stats *stats, int index,
- unsigned long *count);
switch (stringset) {
case ETH_SS_STATS:
- if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) {
- dump = priv->hw->mac->safety_feat_dump;
+ if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
- const char *desc = dump(&priv->sstats, i, NULL);
-
- if (desc) {
+ const char *desc;
+ if (!stmmac_safety_feat_dump(priv,
+ &priv->sstats, i,
+ NULL, &desc)) {
memcpy(p, desc, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -652,12 +632,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- spin_lock_irq(&priv->lock);
+ mutex_lock(&priv->lock);
if (device_can_wakeup(priv->device)) {
wol->supported = WAKE_MAGIC | WAKE_UCAST;
wol->wolopts = priv->wolopts;
}
- spin_unlock_irq(&priv->lock);
+ mutex_unlock(&priv->lock);
}
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -686,9 +666,9 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
disable_irq_wake(priv->wol_irq);
}
- spin_lock_irq(&priv->lock);
+ mutex_lock(&priv->lock);
priv->wolopts = wol->wolopts;
- spin_unlock_irq(&priv->lock);
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -810,7 +790,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
priv->tx_coal_timer = ec->tx_coalesce_usecs;
priv->rx_riwt = rx_riwt;
- priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
+ stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 08c19ebd5306..8d9cc2157afd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -24,13 +24,13 @@
#include "common.h"
#include "stmmac_ptp.h"
-static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
+static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
{
writel(data, ioaddr + PTP_TCR);
}
-static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
- u32 ptp_clock, int gmac4)
+static void config_sub_second_increment(void __iomem *ioaddr,
+ u32 ptp_clock, int gmac4, u32 *ssinc)
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
@@ -57,10 +57,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
writel(reg_value, ioaddr + PTP_SSIR);
- return data;
+ if (ssinc)
+ *ssinc = data;
}
-static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
{
int limit;
u32 value;
@@ -85,7 +86,7 @@ static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
return 0;
}
-static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
+static int config_addend(void __iomem *ioaddr, u32 addend)
{
u32 value;
int limit;
@@ -109,8 +110,8 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
return 0;
}
-static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub, int gmac4)
+static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub, int gmac4)
{
u32 value;
int limit;
@@ -152,7 +153,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
return 0;
}
-static u64 stmmac_get_systime(void __iomem *ioaddr)
+static void get_systime(void __iomem *ioaddr, u64 *systime)
{
u64 ns;
@@ -161,14 +162,15 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
/* Get the TSS and convert sec time value to nanosecond */
ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
- return ns;
+ if (systime)
+ *systime = ns;
}
const struct stmmac_hwtimestamp stmmac_ptp = {
- .config_hw_tstamping = stmmac_config_hw_tstamping,
- .init_systime = stmmac_init_systime,
- .config_sub_second_increment = stmmac_config_sub_second_increment,
- .config_addend = stmmac_config_addend,
- .adjust_systime = stmmac_adjust_systime,
- .get_systime = stmmac_get_systime,
+ .config_hw_tstamping = config_hw_tstamping,
+ .init_systime = init_systime,
+ .config_sub_second_increment = config_sub_second_increment,
+ .config_addend = config_addend,
+ .adjust_systime = adjust_systime,
+ .get_systime = get_systime,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b65e2d144698..11fb7c777d89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -45,11 +45,13 @@
#include <linux/seq_file.h>
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
+#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
#include <linux/of_mdio.h>
#include "dwmac1000.h"
+#include "hwif.h"
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -335,8 +337,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
- priv->hw->mac->set_eee_mode(priv->hw,
- priv->plat->en_tx_lpi_clockgating);
+ stmmac_set_eee_mode(priv, priv->hw,
+ priv->plat->en_tx_lpi_clockgating);
}
/**
@@ -347,7 +349,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
*/
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
- priv->hw->mac->reset_eee_mode(priv->hw);
+ stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
}
@@ -379,7 +381,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
{
struct net_device *ndev = priv->dev;
int interface = priv->plat->interface;
- unsigned long flags;
bool ret = false;
if ((interface != PHY_INTERFACE_MODE_MII) &&
@@ -406,19 +407,19 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
* changed).
* In that case the driver disable own timers.
*/
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
if (priv->eee_active) {
netdev_dbg(priv->dev, "disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer);
- priv->hw->mac->set_eee_timer(priv->hw, 0,
- tx_lpi_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0,
+ tx_lpi_timer);
}
priv->eee_active = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
goto out;
}
/* Activate the EEE and start timers */
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
if (!priv->eee_active) {
priv->eee_active = 1;
timer_setup(&priv->eee_ctrl_timer,
@@ -426,15 +427,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
mod_timer(&priv->eee_ctrl_timer,
STMMAC_LPI_T(eee_timer));
- priv->hw->mac->set_eee_timer(priv->hw,
- STMMAC_DEFAULT_LIT_LS,
- tx_lpi_timer);
+ stmmac_set_eee_timer(priv, priv->hw,
+ STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
}
/* Set HW EEE according to the speed */
- priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
+ stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
ret = true;
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
}
@@ -464,9 +464,9 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
return;
/* check tx tstamp status */
- if (priv->hw->desc->get_tx_timestamp_status(p)) {
+ if (stmmac_get_tx_timestamp_status(priv, p)) {
/* get the valid tstamp */
- ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
+ stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -502,8 +502,8 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
desc = np;
/* Check if timestamp is available */
- if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -707,20 +707,24 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en)
- priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
else {
value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
tstamp_all | ptp_v2 | ptp_over_ethernet |
ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
ts_master_en | snap_type_sel);
- priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
/* program Sub Second Increment reg */
- sec_inc = priv->hw->ptp->config_sub_second_increment(
- priv->ptpaddr, priv->plat->clk_ptp_rate,
- priv->plat->has_gmac4);
+ stmmac_config_sub_second_increment(priv,
+ priv->ptpaddr, priv->plat->clk_ptp_rate,
+ priv->plat->has_gmac4, &sec_inc);
temp = div_u64(1000000000ULL, sec_inc);
+ /* Store sub second increment and flags for later use */
+ priv->sub_second_inc = sec_inc;
+ priv->systime_flags = value;
+
/* calculate default added value:
* formula is :
* addend = (2^32)/freq_div_ratio;
@@ -728,15 +732,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/
temp = (u64)(temp << 32);
priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- priv->hw->ptp->config_addend(priv->ptpaddr,
- priv->default_addend);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
/* initialize system time */
ktime_get_real_ts64(&now);
/* lower 32 bits of tv_sec are safe until y2106 */
- priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
- now.tv_nsec);
+ stmmac_init_systime(priv, priv->ptpaddr,
+ (u32)now.tv_sec, now.tv_nsec);
}
return copy_to_user(ifr->ifr_data, &config,
@@ -770,7 +773,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
netdev_info(priv->dev,
"IEEE 1588-2008 Advanced Timestamp supported\n");
- priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
@@ -795,8 +797,8 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
}
/**
@@ -812,13 +814,12 @@ static void stmmac_adjust_link(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
- unsigned long flags;
bool new_state = false;
if (!phydev)
return;
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -877,7 +878,7 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
if (phydev->is_pseudo_fixed_link)
/* Stop PHY layer to call the hook to adjust the link in case
@@ -1008,7 +1009,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
head_rx = (void *)rx_q->dma_rx;
/* Display RX ring */
- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
}
}
@@ -1029,7 +1030,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
else
head_tx = (void *)tx_q->dma_tx;
- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+ stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
}
}
@@ -1073,13 +1074,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
/* Clear the RX descriptors */
for (i = 0; i < DMA_RX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
- priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == DMA_RX_SIZE - 1));
else
- priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
- priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == DMA_RX_SIZE - 1));
}
/**
@@ -1097,13 +1098,11 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/* Clear the TX descriptors */
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
- priv->mode,
- (i == DMA_TX_SIZE - 1));
+ stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
+ priv->mode, (i == DMA_TX_SIZE - 1));
else
- priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
- priv->mode,
- (i == DMA_TX_SIZE - 1));
+ stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
+ priv->mode, (i == DMA_TX_SIZE - 1));
}
/**
@@ -1159,14 +1158,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
- else
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
- if ((priv->hw->mode->init_desc3) &&
- (priv->dma_buf_sz == BUF_SIZE_16KiB))
- priv->hw->mode->init_desc3(p);
+ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ stmmac_init_desc3(priv, p);
return 0;
}
@@ -1232,13 +1227,14 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
- unsigned int bfsize = 0;
int ret = -ENOMEM;
+ int bfsize = 0;
int queue;
int i;
- if (priv->hw->mode->set_16kib_bfsize)
- bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+ if (bfsize < 0)
+ bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
@@ -1282,13 +1278,11 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
- priv->hw->mode->init(rx_q->dma_erx,
- rx_q->dma_rx_phy,
- DMA_RX_SIZE, 1);
+ stmmac_mode_init(priv, rx_q->dma_erx,
+ rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
else
- priv->hw->mode->init(rx_q->dma_rx,
- rx_q->dma_rx_phy,
- DMA_RX_SIZE, 0);
+ stmmac_mode_init(priv, rx_q->dma_rx,
+ rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
}
}
@@ -1335,13 +1329,11 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
- priv->hw->mode->init(tx_q->dma_etx,
- tx_q->dma_tx_phy,
- DMA_TX_SIZE, 1);
+ stmmac_mode_init(priv, tx_q->dma_etx,
+ tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
else
- priv->hw->mode->init(tx_q->dma_tx,
- tx_q->dma_tx_phy,
- DMA_TX_SIZE, 0);
+ stmmac_mode_init(priv, tx_q->dma_tx,
+ tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
}
for (i = 0; i < DMA_TX_SIZE; i++) {
@@ -1351,14 +1343,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
else
p = tx_q->dma_tx + i;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- p->des0 = 0;
- p->des1 = 0;
- p->des2 = 0;
- p->des3 = 0;
- } else {
- p->des2 = 0;
- }
+ stmmac_clear_desc(priv, p);
tx_q->tx_skbuff_dma[i].buf = 0;
tx_q->tx_skbuff_dma[i].map_as_page = false;
@@ -1664,7 +1649,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
for (queue = 0; queue < rx_queues_count; queue++) {
mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
- priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
}
}
@@ -1678,7 +1663,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
- priv->hw->dma->start_rx(priv->ioaddr, chan);
+ stmmac_start_rx(priv, priv->ioaddr, chan);
}
/**
@@ -1691,7 +1676,7 @@ static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
- priv->hw->dma->start_tx(priv->ioaddr, chan);
+ stmmac_start_tx(priv, priv->ioaddr, chan);
}
/**
@@ -1704,7 +1689,7 @@ static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
- priv->hw->dma->stop_rx(priv->ioaddr, chan);
+ stmmac_stop_rx(priv, priv->ioaddr, chan);
}
/**
@@ -1717,7 +1702,7 @@ static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
- priv->hw->dma->stop_tx(priv->ioaddr, chan);
+ stmmac_stop_tx(priv, priv->ioaddr, chan);
}
/**
@@ -1804,23 +1789,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
/* configure all channels */
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (chan = 0; chan < rx_channels_count; chan++) {
- qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
- priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
- rxfifosz, qmode);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+ }
- for (chan = 0; chan < tx_channels_count; chan++) {
- qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
- txfifosz, qmode);
- }
- } else {
- priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
- rxfifosz);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
}
}
@@ -1851,9 +1831,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
else
p = tx_q->dma_tx + entry;
- status = priv->hw->desc->tx_status(&priv->dev->stats,
- &priv->xstats, p,
- priv->ioaddr);
+ status = stmmac_tx_status(priv, &priv->dev->stats,
+ &priv->xstats, p, priv->ioaddr);
/* Check if the descriptor is owned by the DMA */
if (unlikely(status & tx_dma_own))
break;
@@ -1891,8 +1870,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff_dma[entry].map_as_page = false;
}
- if (priv->hw->mode->clean_desc3)
- priv->hw->mode->clean_desc3(tx_q, p);
+ stmmac_clean_desc3(priv, tx_q, p);
tx_q->tx_skbuff_dma[entry].last_segment = false;
tx_q->tx_skbuff_dma[entry].is_jumbo = false;
@@ -1904,7 +1882,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff[entry] = NULL;
}
- priv->hw->desc->release_tx_desc(p, priv->mode);
+ stmmac_release_tx_desc(priv, p, priv->mode);
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
}
@@ -1929,16 +1907,6 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
netif_tx_unlock(priv->dev);
}
-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
-{
- priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
-}
-
-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
-{
- priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
-}
-
/**
* stmmac_tx_err - to manage the tx error
* @priv: driver private structure
@@ -1957,13 +1925,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
dma_free_tx_skbufs(priv, chan);
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
- priv->mode,
- (i == DMA_TX_SIZE - 1));
+ stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
+ priv->mode, (i == DMA_TX_SIZE - 1));
else
- priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
- priv->mode,
- (i == DMA_TX_SIZE - 1));
+ stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
+ priv->mode, (i == DMA_TX_SIZE - 1));
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
@@ -2003,31 +1969,22 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
- rxfifosz, rxqmode);
- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
- txfifosz, txqmode);
- } else {
- priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
- rxfifosz);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
}
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
- bool ret = false;
-
- /* Safety features are only available in cores >= 5.10 */
- if (priv->synopsys_id < DWMAC_CORE_5_10)
- return ret;
- if (priv->hw->mac->safety_feat_irq_status)
- ret = priv->hw->mac->safety_feat_irq_status(priv->dev,
- priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+ int ret;
- if (ret)
+ ret = stmmac_safety_feat_irq_status(priv, priv->dev,
+ priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+ if (ret && (ret != -EINVAL)) {
stmmac_global_err(priv);
- return ret;
+ return true;
+ }
+
+ return false;
}
/**
@@ -2045,7 +2002,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
tx_channel_count : rx_channel_count;
u32 chan;
bool poll_scheduled = false;
- int status[channels_to_check];
+ int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+ /* Make sure we never check beyond our status buffer. */
+ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+ channels_to_check = ARRAY_SIZE(status);
/* Each DMA channel can be used for rx and tx simultaneously, yet
* napi_struct is embedded in struct stmmac_rx_queue rather than in a
@@ -2054,16 +2015,15 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
* all tx queues rather than just a single tx queue.
*/
for (chan = 0; chan < channels_to_check; chan++)
- status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
- &priv->xstats,
- chan);
+ status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan);
for (chan = 0; chan < rx_channel_count; chan++) {
if (likely(status[chan] & handle_rx)) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
__napi_schedule(&rx_q->napi);
poll_scheduled = true;
}
@@ -2084,7 +2044,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
&priv->rx_queue[0];
if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv, chan);
+ stmmac_disable_dma_irq(priv,
+ priv->ioaddr, chan);
__napi_schedule(&rx_q->napi);
}
break;
@@ -2126,14 +2087,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- } else {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- }
-
dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
@@ -2144,32 +2097,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
}
/**
- * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
- * @priv: driver private structure
- * Description: select the Enhanced/Alternate or Normal descriptors.
- * In case of Enhanced/Alternate, it checks if the extended descriptors are
- * supported by the HW capability register.
- */
-static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
-{
- if (priv->plat->enh_desc) {
- dev_info(priv->device, "Enhanced/Alternate descriptors\n");
-
- /* GMAC older than 3.50 has no extended descriptors */
- if (priv->synopsys_id >= DWMAC_CORE_3_50) {
- dev_info(priv->device, "Enabled extended descriptors\n");
- priv->extend_desc = 1;
- } else
- dev_warn(priv->device, "Extended descriptors not supported\n");
-
- priv->hw->desc = &enh_desc_ops;
- } else {
- dev_info(priv->device, "Normal descriptors\n");
- priv->hw->desc = &ndesc_ops;
- }
-}
-
-/**
* stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
* @priv: driver private structure
* Description:
@@ -2180,15 +2107,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
- u32 ret = 0;
-
- if (priv->hw->dma->get_hw_feature) {
- priv->hw->dma->get_hw_feature(priv->ioaddr,
- &priv->dma_cap);
- ret = 1;
- }
-
- return ret;
+ return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
}
/**
@@ -2201,8 +2120,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
- priv->hw->mac->get_umac_addr(priv->hw,
- priv->dev->dev_addr, 0);
+ stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
netdev_info(priv->dev, "device MAC address %pM\n",
@@ -2222,10 +2140,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
- u32 dummy_dma_rx_phy = 0;
- u32 dummy_dma_tx_phy = 0;
u32 chan = 0;
int atds = 0;
int ret = 0;
@@ -2238,59 +2155,47 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
atds = 1;
- ret = priv->hw->dma->reset(priv->ioaddr);
+ ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
dev_err(priv->device, "Failed to reset the dma\n");
return ret;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- /* DMA Configuration */
- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
- dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
- /* DMA RX Channel Configuration */
- for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
- priv->hw->dma->init_rx_chan(priv->ioaddr,
- priv->plat->dma_cfg,
- rx_q->dma_rx_phy, chan);
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
- rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
- rx_q->rx_tail_addr,
- chan);
- }
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ tx_q = &priv->tx_queue[chan];
- /* DMA TX Channel Configuration */
- for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
- priv->hw->dma->init_chan(priv->ioaddr,
- priv->plat->dma_cfg,
- chan);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+ }
- priv->hw->dma->init_tx_chan(priv->ioaddr,
- priv->plat->dma_cfg,
- tx_q->dma_tx_phy, chan);
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
- tx_q->tx_tail_addr,
- chan);
- }
- } else {
- rx_q = &priv->rx_queue[chan];
- tx_q = &priv->tx_queue[chan];
- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
- }
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
- if (priv->plat->axi && priv->hw->dma->axi)
- priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
return ret;
}
@@ -2336,18 +2241,14 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
u32 chan;
/* set TX ring length */
- if (priv->hw->dma->set_tx_ring_len) {
- for (chan = 0; chan < tx_channels_count; chan++)
- priv->hw->dma->set_tx_ring_len(priv->ioaddr,
- (DMA_TX_SIZE - 1), chan);
- }
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_set_tx_ring_len(priv, priv->ioaddr,
+ (DMA_TX_SIZE - 1), chan);
/* set RX ring length */
- if (priv->hw->dma->set_rx_ring_len) {
- for (chan = 0; chan < rx_channels_count; chan++)
- priv->hw->dma->set_rx_ring_len(priv->ioaddr,
- (DMA_RX_SIZE - 1), chan);
- }
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_set_rx_ring_len(priv, priv->ioaddr,
+ (DMA_RX_SIZE - 1), chan);
}
/**
@@ -2363,7 +2264,7 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
for (queue = 0; queue < tx_queues_count; queue++) {
weight = priv->plat->tx_queues_cfg[queue].weight;
- priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
}
}
@@ -2384,7 +2285,7 @@ static void stmmac_configure_cbs(struct stmmac_priv *priv)
if (mode_to_use == MTL_QUEUE_DCB)
continue;
- priv->hw->mac->config_cbs(priv->hw,
+ stmmac_config_cbs(priv, priv->hw,
priv->plat->tx_queues_cfg[queue].send_slope,
priv->plat->tx_queues_cfg[queue].idle_slope,
priv->plat->tx_queues_cfg[queue].high_credit,
@@ -2406,7 +2307,7 @@ static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
for (queue = 0; queue < rx_queues_count; queue++) {
chan = priv->plat->rx_queues_cfg[queue].chan;
- priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
}
}
@@ -2426,7 +2327,7 @@ static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
continue;
prio = priv->plat->rx_queues_cfg[queue].prio;
- priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
}
}
@@ -2446,7 +2347,7 @@ static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
continue;
prio = priv->plat->tx_queues_cfg[queue].prio;
- priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
}
}
@@ -2467,7 +2368,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
continue;
packet = priv->plat->rx_queues_cfg[queue].pkt_route;
- priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
}
}
@@ -2481,50 +2382,47 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
u32 rx_queues_count = priv->plat->rx_queues_to_use;
u32 tx_queues_count = priv->plat->tx_queues_to_use;
- if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
+ if (tx_queues_count > 1)
stmmac_set_tx_queue_weight(priv);
/* Configure MTL RX algorithms */
- if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
- priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
- priv->plat->rx_sched_algorithm);
+ if (rx_queues_count > 1)
+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
+ priv->plat->rx_sched_algorithm);
/* Configure MTL TX algorithms */
- if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
- priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
- priv->plat->tx_sched_algorithm);
+ if (tx_queues_count > 1)
+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
+ priv->plat->tx_sched_algorithm);
/* Configure CBS in AVB TX queues */
- if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
+ if (tx_queues_count > 1)
stmmac_configure_cbs(priv);
/* Map RX MTL to DMA channels */
- if (priv->hw->mac->map_mtl_to_dma)
- stmmac_rx_queue_dma_chan_map(priv);
+ stmmac_rx_queue_dma_chan_map(priv);
/* Enable MAC RX Queues */
- if (priv->hw->mac->rx_queue_enable)
- stmmac_mac_enable_rx_queues(priv);
+ stmmac_mac_enable_rx_queues(priv);
/* Set RX priorities */
- if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
+ if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_prio(priv);
/* Set TX priorities */
- if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
+ if (tx_queues_count > 1)
stmmac_mac_config_tx_queues_prio(priv);
/* Set RX routing */
- if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
+ if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_routing(priv);
}
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
{
- if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) {
+ if (priv->dma_cap.asp) {
netdev_info(priv->dev, "Enabling Safety Features\n");
- priv->hw->mac->safety_feat_config(priv->ioaddr,
- priv->dma_cap.asp);
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
} else {
netdev_info(priv->dev, "No Safety Features support found\n");
}
@@ -2559,7 +2457,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Copy the MAC addr into the HW */
- priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -2575,17 +2473,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->hw, dev);
+ stmmac_core_init(priv, priv->hw, dev);
/* Initialize MTL*/
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- stmmac_mtl_configuration(priv);
+ stmmac_mtl_configuration(priv);
/* Initialize Safety Features */
- if (priv->synopsys_id >= DWMAC_CORE_5_10)
- stmmac_safety_feat_configuration(priv);
+ stmmac_safety_feat_configuration(priv);
- ret = priv->hw->mac->rx_ipc(priv->hw);
+ ret = stmmac_rx_ipc(priv, priv->hw);
if (!ret) {
netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
@@ -2593,7 +2489,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- priv->hw->mac->set_mac(priv->ioaddr, true);
+ stmmac_mac_set(priv, priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -2623,13 +2519,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
- if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
- priv->rx_riwt = MAX_DMA_RIWT;
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+ if (priv->use_riwt) {
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+ if (!ret)
+ priv->rx_riwt = MAX_DMA_RIWT;
}
- if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
- priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
+ if (priv->hw->pcs)
+ stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@ -2637,7 +2534,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++)
- priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
+ stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
return 0;
@@ -2808,7 +2705,7 @@ static int stmmac_release(struct net_device *dev)
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- priv->hw->mac->set_mac(priv->ioaddr, false);
+ stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(dev);
@@ -2851,10 +2748,10 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
- priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
- 0, 1,
- (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
- 0, 0);
+ stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+ 0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
}
@@ -2926,7 +2823,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != tx_q->mss) {
mss_desc = tx_q->dma_tx + tx_q->cur_tx;
- priv->hw->desc->set_mss(mss_desc, mss);
+ stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
@@ -3012,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
STMMAC_COAL_TIMER(priv->tx_coal_timer));
} else {
priv->tx_count_frames = 0;
- priv->hw->desc->set_tx_ic(desc);
+ stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
}
@@ -3022,11 +2919,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- priv->hw->desc->enable_tx_timestamp(first);
+ stmmac_enable_tx_timestamp(priv, first);
}
/* Complete the first descriptor before granting the DMA */
- priv->hw->desc->prepare_tso_tx_desc(first, 1,
+ stmmac_prepare_tso_tx_desc(priv, first, 1,
proto_hdr_len,
pay_len,
1, tx_q->tx_skbuff_dma[first_entry].last_segment,
@@ -3040,7 +2937,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* sure that MSS's own bit is the last thing written.
*/
dma_wmb();
- priv->hw->desc->set_tx_owner(mss_desc);
+ stmmac_set_tx_owner(priv, mss_desc);
}
/* The own bit must be the latest setting done when prepare the
@@ -3054,8 +2951,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
tx_q->cur_tx, first, nfrags);
- priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
- 0);
+ stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
@@ -3063,8 +2959,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
- queue);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3136,12 +3031,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
- is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
+ is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
- if (unlikely(is_jumbo) && likely(priv->synopsys_id <
- DWMAC_CORE_4_00)) {
- entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
- if (unlikely(entry < 0))
+ if (unlikely(is_jumbo)) {
+ entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
+ if (unlikely(entry < 0) && (entry != -EINVAL))
goto dma_map_err;
}
@@ -3164,19 +3058,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff_dma[entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- desc->des0 = cpu_to_le32(des);
- else
- desc->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, desc, des);
tx_q->tx_skbuff_dma[entry].map_as_page = true;
tx_q->tx_skbuff_dma[entry].len = len;
tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
/* Prepare the descriptor and set the own bit too */
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
- priv->mode, 1, last_segment,
- skb->len);
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
+ priv->mode, 1, last_segment, skb->len);
}
/* Only the last descriptor gets to point to the skb. */
@@ -3203,7 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
else
tx_head = (void *)tx_q->dma_tx;
- priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
+ stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
@@ -3223,13 +3114,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
+ !priv->tx_timer_armed) {
mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ priv->tx_timer_armed = true;
} else {
priv->tx_count_frames = 0;
- priv->hw->desc->set_tx_ic(desc);
+ stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ priv->tx_timer_armed = false;
}
skb_tx_timestamp(skb);
@@ -3247,10 +3141,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- first->des0 = cpu_to_le32(des);
- else
- first->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, first, des);
tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -3259,13 +3151,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- priv->hw->desc->enable_tx_timestamp(first);
+ stmmac_enable_tx_timestamp(priv, first);
}
/* Prepare the first descriptor setting the OWN bit too */
- priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
- csum_insertion, priv->mode, 1,
- last_segment, skb->len);
+ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
+ csum_insertion, priv->mode, 1, last_segment,
+ skb->len);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
@@ -3276,11 +3168,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- if (priv->synopsys_id < DWMAC_CORE_4_00)
- priv->hw->dma->enable_dma_transmission(priv->ioaddr);
- else
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
- queue);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3364,14 +3253,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- p->des1 = 0;
- } else {
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- }
- if (priv->hw->mode->refill_desc3)
- priv->hw->mode->refill_desc3(rx_q, p);
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
+ stmmac_refill_desc3(priv, rx_q, p);
if (rx_q->rx_zeroc_thresh > 0)
rx_q->rx_zeroc_thresh--;
@@ -3381,10 +3264,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
}
dma_wmb();
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
- else
- priv->hw->desc->set_rx_owner(p);
+ stmmac_set_rx_owner(priv, p, priv->use_riwt);
dma_wmb();
@@ -3418,7 +3298,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
rx_head = (void *)rx_q->dma_rx;
- priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
int status;
@@ -3431,8 +3311,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = priv->hw->desc->rx_status(&priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -3449,11 +3329,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
prefetch(np);
- if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
- priv->hw->desc->rx_extended_status(&priv->dev->stats,
- &priv->xstats,
- rx_q->dma_erx +
- entry);
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
if (priv->hwts_rx_en && !priv->extend_desc) {
@@ -3474,12 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int frame_len;
unsigned int des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- des = le32_to_cpu(p->des0);
- else
- des = le32_to_cpu(p->des2);
-
- frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+ stmmac_get_desc_addr(priv, p, &des);
+ frame_len = stmmac_get_rx_frame_len(priv, p, coe);
/* If frame length is greater than skb buffer size
* (preallocated during init) then the packet is
@@ -3621,7 +3495,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
work_done = stmmac_rx(priv, budget, rx_q->queue_index);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- stmmac_enable_dma_irq(priv, chan);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
}
return work_done;
}
@@ -3654,7 +3528,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- priv->hw->mac->set_filter(priv->hw, dev);
+ stmmac_set_filter(priv, priv->hw, dev);
}
/**
@@ -3727,7 +3601,7 @@ static int stmmac_set_features(struct net_device *netdev,
/* No check needed because rx_coe has been set before and it will be
* fixed in case of issue.
*/
- priv->hw->mac->rx_ipc(priv->hw);
+ stmmac_rx_ipc(priv, priv->hw);
return 0;
}
@@ -3771,8 +3645,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
- int status = priv->hw->mac->host_irq_status(priv->hw,
- &priv->xstats);
+ int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+ int mtl_status;
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@@ -3782,21 +3656,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = false;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (queue = 0; queue < queues_count; queue++) {
- struct stmmac_rx_queue *rx_q =
- &priv->rx_queue[queue];
-
- status |=
- priv->hw->mac->host_mtl_irq_status(priv->hw,
- queue);
+ for (queue = 0; queue < queues_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
- priv->hw->dma->set_rx_tail_ptr)
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
- rx_q->rx_tail_addr,
+ mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
queue);
- }
+ if (mtl_status != -EINVAL)
+ status |= mtl_status;
+
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr,
+ queue);
}
/* PCS link status */
@@ -3860,6 +3731,58 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct stmmac_priv *priv = cb_priv;
+ int ret = -EOPNOTSUPP;
+
+ stmmac_disable_all_queues(priv);
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+ break;
+ default:
+ break;
+ }
+
+ stmmac_enable_all_queues(priv);
+ return ret;
+}
+
+static int stmmac_setup_tc_block(struct stmmac_priv *priv,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return stmmac_setup_tc_block(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -3869,7 +3792,7 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
return ret;
- priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
+ stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
return ret;
}
@@ -4098,6 +4021,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
@@ -4145,49 +4069,17 @@ static void stmmac_service_task(struct work_struct *work)
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
-
- /* Identify the MAC HW device */
- if (priv->plat->setup) {
- mac = priv->plat->setup(priv);
- } else if (priv->plat->has_gmac) {
- priv->dev->priv_flags |= IFF_UNICAST_FLT;
- mac = dwmac1000_setup(priv->ioaddr,
- priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries,
- &priv->synopsys_id);
- } else if (priv->plat->has_gmac4) {
- priv->dev->priv_flags |= IFF_UNICAST_FLT;
- mac = dwmac4_setup(priv->ioaddr,
- priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries,
- &priv->synopsys_id);
- } else {
- mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
- }
- if (!mac)
- return -ENOMEM;
-
- priv->hw = mac;
+ int ret;
/* dwmac-sun8i only work in chain mode */
if (priv->plat->has_sun8i)
chain_mode = 1;
+ priv->chain_mode = chain_mode;
- /* To use the chained or ring mode */
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->hw->mode = &dwmac4_ring_mode_ops;
- } else {
- if (chain_mode) {
- priv->hw->mode = &chain_mode_ops;
- dev_info(priv->device, "Chain mode enabled\n");
- priv->mode = STMMAC_CHAIN_MODE;
- } else {
- priv->hw->mode = &ring_mode_ops;
- dev_info(priv->device, "Ring mode enabled\n");
- priv->mode = STMMAC_RING_MODE;
- }
- }
+ /* Initialize HW Interface */
+ ret = stmmac_hwif_init(priv);
+ if (ret)
+ return ret;
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
@@ -4221,12 +4113,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
dev_info(priv->device, "No HW DMA feature register supported\n");
}
- /* To use alternate (extended), normal or GMAC4 descriptor structures */
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- priv->hw->desc = &dwmac4_desc_ops;
- else
- stmmac_selec_desc_mode(priv);
-
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
dev_info(priv->device, "RX Checksum Offload Engine supported\n");
@@ -4335,6 +4221,11 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+ ret = stmmac_tc_init(priv, priv);
+ if (!ret) {
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
priv->tso = true;
@@ -4386,7 +4277,7 @@ int stmmac_dvr_probe(struct device *device,
(8 * priv->plat->rx_queues_to_use));
}
- spin_lock_init(&priv->lock);
+ mutex_init(&priv->lock);
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
@@ -4458,7 +4349,7 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_stop_all_dma(priv);
- priv->hw->mac->set_mac(priv->ioaddr, false);
+ stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->plat->stmmac_rst)
@@ -4470,6 +4361,7 @@ int stmmac_dvr_remove(struct device *dev)
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
+ mutex_destroy(&priv->lock);
free_netdev(ndev);
return 0;
@@ -4487,7 +4379,6 @@ int stmmac_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long flags;
if (!ndev || !netif_running(ndev))
return 0;
@@ -4495,7 +4386,7 @@ int stmmac_suspend(struct device *dev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
netif_device_detach(ndev);
stmmac_stop_all_queues(priv);
@@ -4507,16 +4398,16 @@ int stmmac_suspend(struct device *dev)
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
- priv->hw->mac->pmt(priv->hw, priv->wolopts);
+ stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
- priv->hw->mac->set_mac(priv->ioaddr, false);
+ stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable(priv->plat->pclk);
clk_disable(priv->plat->stmmac_clk);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
@@ -4561,7 +4452,6 @@ int stmmac_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long flags;
if (!netif_running(ndev))
return 0;
@@ -4573,9 +4463,9 @@ int stmmac_resume(struct device *dev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->hw->mac->pmt(priv->hw, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_lock(&priv->lock);
+ stmmac_pmt(priv, priv->hw, 0);
+ mutex_unlock(&priv->lock);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -4589,7 +4479,7 @@ int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -4603,7 +4493,7 @@ int stmmac_resume(struct device *dev)
stmmac_start_all_queues(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
if (ndev->phydev)
phy_start(ndev->phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index f5f37bfa1d58..5df1a608e566 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -233,10 +233,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->phy_mask = mdio_bus_data->phy_mask;
new_bus->parent = priv->device;
- if (mdio_node)
- err = of_mdiobus_register(new_bus, mdio_node);
- else
- err = mdiobus_register(new_bus);
+ err = of_mdiobus_register(new_bus, mdio_node);
if (err != 0) {
dev_err(dev, "Cannot register the MDIO bus\n");
goto bus_register_fail;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ebd3e5ffa73c..6d141f3931eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -472,7 +472,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a")) {
+ of_device_is_compatible(np, "snps,dwmac-4.10a") ||
+ of_device_is_compatible(np, "snps,dwmac-4.20a")) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e471a903c654..0cb0e39a2be9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -49,9 +49,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
addend = neg_adj ? (addend - diff) : (addend + diff);
spin_lock_irqsave(&priv->ptp_lock, flags);
-
- priv->hw->ptp->config_addend(priv->ptpaddr, addend);
-
+ stmmac_config_addend(priv, priv->ptpaddr, addend);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
@@ -84,10 +82,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
spin_lock_irqsave(&priv->ptp_lock, flags);
-
- priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
- priv->plat->has_gmac4);
-
+ stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj,
+ priv->plat->has_gmac4);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
@@ -110,9 +106,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
u64 ns;
spin_lock_irqsave(&priv->ptp_lock, flags);
-
- ns = priv->hw->ptp->get_systime(priv->ptpaddr);
-
+ stmmac_get_systime(priv, priv->ptpaddr, &ns);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -137,9 +131,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
unsigned long flags;
spin_lock_irqsave(&priv->ptp_lock, flags);
-
- priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
-
+ stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
@@ -148,17 +140,43 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
static int stmmac_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
- return -EOPNOTSUPP;
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ struct stmmac_pps_cfg *cfg;
+ int ret = -EOPNOTSUPP;
+ unsigned long flags;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ cfg = &priv->pps[rq->perout.index];
+
+ cfg->start.tv_sec = rq->perout.start.sec;
+ cfg->start.tv_nsec = rq->perout.start.nsec;
+ cfg->period.tv_sec = rq->perout.period.sec;
+ cfg->period.tv_nsec = rq->perout.period.nsec;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ ret = stmmac_flex_pps_config(priv, priv->ioaddr,
+ rq->perout.index, cfg, on,
+ priv->sub_second_inc,
+ priv->systime_flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
}
/* structure describing a PTP hardware clock */
-static const struct ptp_clock_info stmmac_ptp_clock_ops = {
+static struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
.name = "stmmac_ptp_clock",
.max_adj = 62500000,
.n_alarm = 0,
.n_ext_ts = 0,
- .n_per_out = 0,
+ .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
.n_pins = 0,
.pps = 0,
.adjfreq = stmmac_adjust_freq,
@@ -176,6 +194,16 @@ static const struct ptp_clock_info stmmac_ptp_clock_ops = {
*/
void stmmac_ptp_register(struct stmmac_priv *priv)
{
+ int i;
+
+ for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
+ if (i >= STMMAC_PPS_MAX)
+ break;
+ priv->pps[i].available = true;
+ }
+
+ stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
new file mode 100644
index 000000000000..881c94b73e2f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac TC Handling (HW only)
+ */
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include "common.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+#include "stmmac.h"
+
+static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
+{
+ memset(entry, 0, sizeof(*entry));
+ entry->in_use = true;
+ entry->is_last = true;
+ entry->is_frag = false;
+ entry->prio = ~0x0;
+ entry->handle = 0;
+ entry->val.match_data = 0x0;
+ entry->val.match_en = 0x0;
+ entry->val.af = 1;
+ entry->val.dma_ch_no = 0x0;
+}
+
+static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls,
+ bool free)
+{
+ struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
+ u32 loc = cls->knode.handle;
+ int i;
+
+ for (i = 0; i < priv->tc_entries_max; i++) {
+ entry = &priv->tc_entries[i];
+ if (!entry->in_use && !first && free)
+ first = entry;
+ if (entry->handle == loc && !free)
+ dup = entry;
+ }
+
+ if (dup)
+ return dup;
+ if (first) {
+ first->handle = loc;
+ first->in_use = true;
+
+ /* Reset HW values */
+ memset(&first->val, 0, sizeof(first->val));
+ }
+
+ return first;
+}
+
+static int tc_fill_actions(struct stmmac_tc_entry *entry,
+ struct stmmac_tc_entry *frag,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *action_entry = entry;
+ const struct tc_action *act;
+ struct tcf_exts *exts;
+ LIST_HEAD(actions);
+
+ exts = cls->knode.exts;
+ if (!tcf_exts_has_actions(exts))
+ return -EINVAL;
+ if (frag)
+ action_entry = frag;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(act, &actions, list) {
+ /* Accept */
+ if (is_tcf_gact_ok(act)) {
+ action_entry->val.af = 1;
+ break;
+ }
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ action_entry->val.rf = 1;
+ break;
+ }
+
+ /* Unsupported */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tc_fill_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *entry, *frag = NULL;
+ struct tc_u32_sel *sel = cls->knode.sel;
+ u32 off, data, mask, real_off, rem;
+ u32 prio = cls->common.prio;
+ int ret;
+
+ /* Only 1 match per entry */
+ if (sel->nkeys <= 0 || sel->nkeys > 1)
+ return -EINVAL;
+
+ off = sel->keys[0].off << sel->offshift;
+ data = sel->keys[0].val;
+ mask = sel->keys[0].mask;
+
+ switch (ntohs(cls->common.protocol)) {
+ case ETH_P_ALL:
+ break;
+ case ETH_P_IP:
+ off += ETH_HLEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off > priv->tc_off_max)
+ return -EINVAL;
+
+ real_off = off / 4;
+ rem = off % 4;
+
+ entry = tc_find_entry(priv, cls, true);
+ if (!entry)
+ return -EINVAL;
+
+ if (rem) {
+ frag = tc_find_entry(priv, cls, true);
+ if (!frag) {
+ ret = -EINVAL;
+ goto err_unuse;
+ }
+
+ entry->frag_ptr = frag;
+ entry->val.match_en = (mask << (rem * 8)) &
+ GENMASK(31, rem * 8);
+ entry->val.match_data = (data << (rem * 8)) &
+ GENMASK(31, rem * 8);
+ entry->val.frame_offset = real_off;
+ entry->prio = prio;
+
+ frag->val.match_en = (mask >> (rem * 8)) &
+ GENMASK(rem * 8 - 1, 0);
+ frag->val.match_data = (data >> (rem * 8)) &
+ GENMASK(rem * 8 - 1, 0);
+ frag->val.frame_offset = real_off + 1;
+ frag->prio = prio;
+ frag->is_frag = true;
+ } else {
+ entry->frag_ptr = NULL;
+ entry->val.match_en = mask;
+ entry->val.match_data = data;
+ entry->val.frame_offset = real_off;
+ entry->prio = prio;
+ }
+
+ ret = tc_fill_actions(entry, frag, cls);
+ if (ret)
+ goto err_unuse;
+
+ return 0;
+
+err_unuse:
+ if (frag)
+ frag->in_use = false;
+ entry->in_use = false;
+ return ret;
+}
+
+static void tc_unfill_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *entry;
+
+ entry = tc_find_entry(priv, cls, false);
+ if (!entry)
+ return;
+
+ entry->in_use = false;
+ if (entry->frag_ptr) {
+ entry = entry->frag_ptr;
+ entry->is_frag = false;
+ entry->in_use = false;
+ }
+}
+
+static int tc_config_knode(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ int ret;
+
+ ret = tc_fill_entry(priv, cls);
+ if (ret)
+ return ret;
+
+ ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
+ if (ret)
+ goto err_unfill;
+
+ return 0;
+
+err_unfill:
+ tc_unfill_entry(priv, cls);
+ return ret;
+}
+
+static int tc_delete_knode(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ int ret;
+
+ /* Set entry and fragments as not used */
+ tc_unfill_entry(priv, cls);
+
+ ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tc_setup_cls_u32(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ switch (cls->command) {
+ case TC_CLSU32_REPLACE_KNODE:
+ tc_unfill_entry(priv, cls);
+ /* Fall through */
+ case TC_CLSU32_NEW_KNODE:
+ return tc_config_knode(priv, cls);
+ case TC_CLSU32_DELETE_KNODE:
+ return tc_delete_knode(priv, cls);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tc_init(struct stmmac_priv *priv)
+{
+ struct dma_features *dma_cap = &priv->dma_cap;
+ unsigned int count;
+
+ if (!dma_cap->frpsel)
+ return -EINVAL;
+
+ switch (dma_cap->frpbs) {
+ case 0x0:
+ priv->tc_off_max = 64;
+ break;
+ case 0x1:
+ priv->tc_off_max = 128;
+ break;
+ case 0x2:
+ priv->tc_off_max = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dma_cap->frpes) {
+ case 0x0:
+ count = 64;
+ break;
+ case 0x1:
+ count = 128;
+ break;
+ case 0x2:
+ count = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reserve one last filter which lets all pass */
+ priv->tc_entries_max = count;
+ priv->tc_entries = devm_kzalloc(priv->device,
+ sizeof(*priv->tc_entries) * count, GFP_KERNEL);
+ if (!priv->tc_entries)
+ return -ENOMEM;
+
+ tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
+
+ dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
+ priv->tc_entries_max, priv->tc_off_max);
+ return 0;
+}
+
+const struct stmmac_tc_ops dwmac510_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+};
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 48a541eb0af2..9263d638bd6d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_TI
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
select TI_DAVINCI_CPDMA
select PHYLIB
@@ -30,7 +30,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -40,7 +40,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
---help---
This driver supports TI's DaVinci CPDMA dma engine.
@@ -60,7 +60,7 @@ config TI_CPSW_ALE
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
select TI_CPSW_PHY_SEL
@@ -75,7 +75,7 @@ config TI_CPSW
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP
+ depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
depends on POSIX_TIMERS
---help---
This driver supports the Common Platform Time Sync unit of
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 18013645e76c..0c1adad7415d 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
}
dev = bus_find_device(&platform_bus_type, NULL, node, match);
- of_node_put(node);
+ if (!dev) {
+ dev_err(dev, "unable to find platform device for %pOF\n", node);
+ goto out;
+ }
+
priv = dev_get_drvdata(dev);
priv->cpsw_phy_sel(priv, phy_mode, slave);
put_device(dev);
+out:
+ of_node_put(node);
}
EXPORT_SYMBOL_GPL(cpsw_phy_sel);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 28d893b93d30..534596ce00d3 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -29,13 +29,14 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
#include <linux/pinctrl/consumer.h>
@@ -957,7 +958,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
{
u32 ch_map;
int num_tx, cur_budget, ch;
@@ -984,7 +985,21 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
if (num_tx < budget) {
napi_complete(napi_tx);
writel(0xff, &cpsw->wr_regs->tx_en);
- if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
+ }
+
+ return num_tx;
+}
+
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx;
+
+ num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->tx_irq_disabled) {
cpsw->tx_irq_disabled = false;
enable_irq(cpsw->irqs_table[1]);
}
@@ -993,7 +1008,7 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
return num_tx;
}
-static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
{
u32 ch_map;
int num_rx, cur_budget, ch;
@@ -1020,7 +1035,21 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
if (num_rx < budget) {
napi_complete_done(napi_rx, num_rx);
writel(0xff, &cpsw->wr_regs->rx_en);
- if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
+ }
+
+ return num_rx;
+}
+
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx;
+
+ num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->rx_irq_disabled) {
cpsw->rx_irq_disabled = false;
enable_irq(cpsw->irqs_table[0]);
}
@@ -1252,8 +1281,8 @@ static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
for (i = 0; i < ch_stats_len; i++) {
line = i % CPSW_STATS_CH_LEN;
snprintf(*p, ETH_GSTRING_LEN,
- "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx",
- i / CPSW_STATS_CH_LEN,
+ "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
+ (long)(i / CPSW_STATS_CH_LEN),
cpsw_gstrings_ch_stats[line].stat_string);
*p += ETH_GSTRING_LEN;
}
@@ -2364,9 +2393,9 @@ static void cpsw_get_channels(struct net_device *ndev,
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
ch->max_combined = 0;
- ch->max_rx = CPSW_MAX_QUEUES;
- ch->max_tx = CPSW_MAX_QUEUES;
ch->max_other = 0;
ch->other_count = 0;
ch->rx_count = cpsw->rx_ch_num;
@@ -2377,6 +2406,11 @@ static void cpsw_get_channels(struct net_device *ndev,
static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
struct ethtool_channels *ch)
{
+ if (cpsw->quirk_irq) {
+ dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
+ return -EOPNOTSUPP;
+ }
+
if (ch->combined_count)
return -EINVAL;
@@ -2917,44 +2951,20 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
return ret;
}
-#define CPSW_QUIRK_IRQ BIT(0)
-
-static const struct platform_device_id cpsw_devtype[] = {
- {
- /* keep it for existing comaptibles */
- .name = "cpsw",
- .driver_data = CPSW_QUIRK_IRQ,
- }, {
- .name = "am335x-cpsw",
- .driver_data = CPSW_QUIRK_IRQ,
- }, {
- .name = "am4372-cpsw",
- .driver_data = 0,
- }, {
- .name = "dra7-cpsw",
- .driver_data = 0,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, cpsw_devtype);
-
-enum ti_cpsw_type {
- CPSW = 0,
- AM335X_CPSW,
- AM4372_CPSW,
- DRA7_CPSW,
-};
-
static const struct of_device_id cpsw_of_mtable[] = {
- { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
- { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
- { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
- { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+ { .compatible = "ti,cpsw"},
+ { .compatible = "ti,am335x-cpsw"},
+ { .compatible = "ti,am4372-cpsw"},
+ { .compatible = "ti,dra7-cpsw"},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
static int cpsw_probe(struct platform_device *pdev)
{
struct clk *clk;
@@ -2966,9 +2976,9 @@ static int cpsw_probe(struct platform_device *pdev)
void __iomem *ss_regs;
void __iomem *cpts_regs;
struct resource *res, *ss_res;
- const struct of_device_id *of_id;
struct gpio_descs *mode;
u32 slave_offset, sliver_offset, slave_size;
+ const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
int ret = 0, i;
int irq;
@@ -3141,6 +3151,10 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dt_ret;
}
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
+
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
dev_err(priv->dev, "error initializing tx dma channel\n");
@@ -3180,19 +3194,16 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
- of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
- if (of_id) {
- pdev->id_entry = of_id->data;
- if (pdev->id_entry->driver_data)
- cpsw->quirk_irq = true;
- }
-
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
+ CPSW_POLL_WEIGHT);
cpsw_split_res(ndev);
/* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index e7b76f6b4f67..6f63c8729afc 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -294,7 +294,8 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
delay = CPTS_SKB_TX_WORK_TIMEOUT;
spin_unlock_irqrestore(&cpts->lock, flags);
- pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ pr_debug("cpts overflow check at %lld.%09ld\n",
+ (long long)ts.tv_sec, ts.tv_nsec);
return (long)delay;
}
@@ -564,7 +565,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->refclk = devm_clk_get(dev, "cpts");
if (IS_ERR(cpts->refclk)) {
dev_err(dev, "Failed to get cpts refclk\n");
- return ERR_PTR(PTR_ERR(cpts->refclk));
+ return ERR_CAST(cpts->refclk);
}
clk_prepare(cpts->refclk);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 31ae04117f0a..cdbddf16dd29 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -191,7 +191,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
return;
WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
- "cpdma_desc_pool size %d != avail %d",
+ "cpdma_desc_pool size %zd != avail %zd",
gen_pool_size(pool->gen_pool),
gen_pool_avail(pool->gen_pool));
if (pool->cpumap)
@@ -1080,7 +1080,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
writel_relaxed(buffer, &desc->hw_buffer);
writel_relaxed(len, &desc->hw_len);
writel_relaxed(mode | len, &desc->hw_mode);
- writel_relaxed(token, &desc->sw_token);
+ writel_relaxed((uintptr_t)token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
writel_relaxed(len, &desc->sw_len);
desc_read(desc, sw_len);
@@ -1121,15 +1121,15 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc_pool *pool = ctlr->pool;
dma_addr_t buff_dma;
int origlen;
- void *token;
+ uintptr_t token;
- token = (void *)desc_read(desc, sw_token);
+ token = desc_read(desc, sw_token);
buff_dma = desc_read(desc, sw_buffer);
origlen = desc_read(desc, sw_len);
dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
cpdma_desc_free(pool, desc, 1);
- (*chan->handler)(token, outlen, status);
+ (*chan->handler)((void *)token, outlen, status);
}
static int __cpdma_chan_process(struct cpdma_chan *chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 38828ab77eb9..06d7c9e4dcda 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1930,8 +1930,8 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (netif_msg_probe(priv)) {
dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
- "(regs: %p, irq: %d)\n",
- (void *)priv->emac_base_phys, ndev->irq);
+ "(regs: %pa, irq: %d)\n",
+ &priv->emac_base_phys, ndev->irq);
}
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 3c33f4504d8e..a98aedae1b41 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -34,6 +34,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
#include <linux/of.h>
@@ -227,14 +228,14 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
static inline int wait_for_idle(struct davinci_mdio_data *data)
{
struct davinci_mdio_regs __iomem *regs = data->regs;
- unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 val, ret;
- while (time_after(timeout, jiffies)) {
- if (__raw_readl(&regs->control) & CONTROL_IDLE)
- return 0;
- }
- dev_err(data->dev, "timed out waiting for idle\n");
- return -ETIMEDOUT;
+ ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
+ 0, MDIO_TIMEOUT * 1000);
+ if (ret)
+ dev_err(data->dev, "timed out waiting for idle\n");
+
+ return ret;
}
static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
@@ -320,7 +321,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
return ret;
}
-#if IS_ENABLED(CONFIG_OF)
static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
struct platform_device *pdev)
{
@@ -338,7 +338,6 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return 0;
}
-#endif
#if IS_ENABLED(CONFIG_OF)
static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
@@ -373,7 +372,7 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (dev->of_node) {
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
const struct of_device_id *of_id;
ret = davinci_mdio_probe_dt(&data->pdata, pdev);
@@ -428,12 +427,10 @@ static int davinci_mdio_probe(struct platform_device *pdev)
* defined to support backward compatibility with DTs which assume that
* Davinci MDIO will always scan the bus for PHYs detection.
*/
- if (dev->of_node && of_get_child_count(dev->of_node)) {
+ if (dev->of_node && of_get_child_count(dev->of_node))
data->skip_scan = true;
- ret = of_mdiobus_register(data->bus, dev->of_node);
- } else {
- ret = mdiobus_register(data->bus);
- }
+
+ ret = of_mdiobus_register(data->bus, dev->of_node);
if (ret)
goto bail_out;
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index 8900a6fad318..c4ffdf47bad5 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -33,6 +33,8 @@
#define SGMII_LINK_MAC_MAC_FORCED 2
#define SGMII_LINK_MAC_FIBER 3
#define SGMII_LINK_MAC_PHY_NO_MDIO 4
+#define RGMII_LINK_MAC_PHY 5
+#define RGMII_LINK_MAC_PHY_NO_MDIO 7
#define XGMII_LINK_MAC_PHY 10
#define XGMII_LINK_MAC_MAC_FORCED 11
@@ -212,6 +214,7 @@ struct netcp_module {
int (*add_vid)(void *intf_priv, int vid);
int (*del_vid)(void *intf_priv, int vid);
int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
+ int (*set_rx_mode)(void *intf_priv, bool promisc);
/* used internally */
struct list_head module_list;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index f5a7eb22d0f5..e40aa3e31af2 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1509,6 +1509,24 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
}
}
+static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc)
+{
+ struct netcp_intf_modpriv *priv;
+ struct netcp_module *module;
+ int error;
+
+ for_each_module(netcp, priv) {
+ module = priv->netcp_module;
+ if (!module->set_rx_mode)
+ continue;
+
+ error = module->set_rx_mode(priv->module_priv, promisc);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
static void netcp_set_rx_mode(struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
@@ -1538,6 +1556,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
/* finally sweep and callout into modules */
netcp_addr_sweep_del(netcp);
netcp_addr_sweep_add(netcp);
+ netcp_set_promiscuous(netcp, promisc);
spin_unlock(&netcp->lock);
}
@@ -2155,8 +2174,13 @@ static int netcp_probe(struct platform_device *pdev)
struct device_node *child, *interfaces;
struct netcp_device *netcp_device;
struct device *dev = &pdev->dev;
+ struct netcp_module *module;
int ret;
+ if (!knav_dma_device_ready() ||
+ !knav_qmss_device_ready())
+ return -EPROBE_DEFER;
+
if (!node) {
dev_err(dev, "could not find device info\n");
return -ENODEV;
@@ -2203,6 +2227,14 @@ static int netcp_probe(struct platform_device *pdev)
/* Add the device instance to the list */
list_add_tail(&netcp_device->device_list, &netcp_devices);
+ /* Probe & attach any modules already registered */
+ mutex_lock(&netcp_modules_lock);
+ for_each_netcp_module(module) {
+ ret = netcp_module_probe(netcp_device, module);
+ if (ret < 0)
+ dev_err(dev, "module(%s) probe failed\n", module->name);
+ }
+ mutex_unlock(&netcp_modules_lock);
return 0;
probe_quit_interface:
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 56dbc0b9fedc..6e455a27a8de 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/if_vlan.h>
#include <linux/ptp_classify.h>
@@ -42,7 +43,7 @@
/* 1G Ethernet SS defines */
#define GBE_MODULE_NAME "netcp-gbe"
-#define GBE_SS_VERSION_14 0x4ed21104
+#define GBE_SS_VERSION_14 0x4ed2
#define GBE_SS_REG_INDEX 0
#define GBE_SGMII34_REG_INDEX 1
@@ -72,6 +73,11 @@
#define IS_SS_ID_NU(d) \
(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
+#define IS_SS_ID_VER_14(d) \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
+#define IS_SS_ID_2U(d) \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
+
#define GBENU_SS_REG_INDEX 0
#define GBENU_SM_REG_INDEX 1
#define GBENU_SGMII_MODULE_OFFSET 0x100
@@ -86,7 +92,7 @@
/* 10G Ethernet SS defines */
#define XGBE_MODULE_NAME "netcp-xgbe"
-#define XGBE_SS_VERSION_10 0x4ee42100
+#define XGBE_SS_VERSION_10 0x4ee4
#define XGBE_SS_REG_INDEX 0
#define XGBE_SM_REG_INDEX 1
@@ -166,6 +172,11 @@
#define GBE_RXHOOK_ORDER 0
#define GBE_DEFAULT_ALE_AGEOUT 30
#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
+#define SLAVE_LINK_IS_RGMII(s) \
+ (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
+ ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
+#define SLAVE_LINK_IS_SGMII(s) \
+ ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
#define NETCP_LINK_STATE_INVALID -1
#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
@@ -549,6 +560,7 @@ struct gbe_ss_regs {
struct gbe_ss_regs_ofs {
u16 id_ver;
u16 control;
+ u16 rgmii_status; /* 2U */
};
struct gbe_switch_regs {
@@ -591,6 +603,7 @@ struct gbe_port_regs {
struct gbe_port_regs_ofs {
u16 port_vlan;
u16 tx_pri_map;
+ u16 rx_pri_map;
u16 sa_lo;
u16 sa_hi;
u16 ts_ctl;
@@ -695,6 +708,7 @@ struct gbe_slave {
u32 link_interface;
u32 mac_control;
u8 phy_port_t;
+ struct device_node *node;
struct device_node *phy_node;
struct ts_ctl ts_ctl;
struct list_head slave_list;
@@ -1915,7 +1929,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev,
gbe_dev = gbe_intf->gbe_dev;
spin_lock_bh(&gbe_dev->hw_stats_lock);
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ if (IS_SS_ID_VER_14(gbe_dev))
gbe_update_stats_ver14(gbe_dev, data);
else
gbe_update_stats(gbe_dev, data);
@@ -2091,8 +2105,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
ALE_PORT_STATE_FORWARD);
if (ndev && slave->open &&
- slave->link_interface != SGMII_LINK_MAC_PHY &&
- slave->link_interface != XGMII_LINK_MAC_PHY)
+ ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != XGMII_LINK_MAC_PHY)))
netif_carrier_on(ndev);
} else {
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
@@ -2101,8 +2116,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
ALE_PORT_STATE,
ALE_PORT_STATE_DISABLE);
if (ndev &&
- slave->link_interface != SGMII_LINK_MAC_PHY &&
- slave->link_interface != XGMII_LINK_MAC_PHY)
+ ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != XGMII_LINK_MAC_PHY)))
netif_carrier_off(ndev);
}
@@ -2115,23 +2131,35 @@ static bool gbe_phy_link_status(struct gbe_slave *slave)
return !slave->phy || slave->phy->link;
}
+#define RGMII_REG_STATUS_LINK BIT(0)
+
+static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
+{
+ u32 val = 0;
+
+ val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
+ *status = !!(val & RGMII_REG_STATUS_LINK);
+}
+
static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
struct gbe_slave *slave,
struct net_device *ndev)
{
- int sp = slave->slave_num;
- int phy_link_state, sgmii_link_state = 1, link_state;
+ bool sw_link_state = true, phy_link_state;
+ int sp = slave->slave_num, link_state;
if (!slave->open)
return;
- if (!SLAVE_LINK_IS_XGMII(slave)) {
- sgmii_link_state =
- netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
- }
+ if (SLAVE_LINK_IS_RGMII(slave))
+ netcp_2u_rgmii_get_port_link(gbe_dev,
+ &sw_link_state);
+ if (SLAVE_LINK_IS_SGMII(slave))
+ sw_link_state =
+ netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
phy_link_state = gbe_phy_link_status(slave);
- link_state = phy_link_state & sgmii_link_state;
+ link_state = phy_link_state & sw_link_state;
if (atomic_xchg(&slave->link_state, link_state) != link_state)
netcp_ethss_link_state_action(gbe_dev, ndev, slave,
@@ -2205,7 +2233,7 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
max_rx_len = NETCP_MAX_FRAME_SIZE;
/* Enable correct MII mode at SS level */
- if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
+ if (IS_SS_ID_XGBE(gbe_dev) &&
(slave->link_interface >= XGMII_LINK_MAC_PHY)) {
xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
xgmii_mode |= (1 << slave->slave_num);
@@ -2236,7 +2264,8 @@ static void gbe_slave_stop(struct gbe_intf *intf)
struct gbe_priv *gbe_dev = intf->gbe_dev;
struct gbe_slave *slave = intf->slave;
- gbe_sgmii_rtreset(gbe_dev, slave, true);
+ if (!IS_SS_ID_2U(gbe_dev))
+ gbe_sgmii_rtreset(gbe_dev, slave, true);
gbe_port_reset(slave);
/* Disable forwarding */
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -2271,11 +2300,20 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
void (*hndlr)(struct net_device *) = gbe_adjust_link;
- gbe_sgmii_config(priv, slave);
+ if (!IS_SS_ID_2U(priv))
+ gbe_sgmii_config(priv, slave);
gbe_port_reset(slave);
- gbe_sgmii_rtreset(priv, slave, false);
+ if (!IS_SS_ID_2U(priv))
+ gbe_sgmii_rtreset(priv, slave, false);
gbe_port_config(priv, slave, priv->rx_packet_max);
gbe_set_slave_mac(slave, gbe_intf);
+ /* For NU & 2U switch, map the vlan priorities to zero
+ * as we only configure to use priority 0
+ */
+ if (IS_SS_ID_MU(priv))
+ writel(HOST_TX_PRI_MAP_DEFAULT,
+ GBE_REG_ADDR(slave, port_regs, rx_pri_map));
+
/* enable forwarding */
cpsw_ale_control_set(priv->ale, slave->port_num,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -2286,6 +2324,21 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
has_phy = true;
phy_mode = PHY_INTERFACE_MODE_SGMII;
slave->phy_port_t = PORT_MII;
+ } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
+ has_phy = true;
+ phy_mode = of_get_phy_mode(slave->node);
+ /* if phy-mode is not present, default to
+ * PHY_INTERFACE_MODE_RGMII
+ */
+ if (phy_mode < 0)
+ phy_mode = PHY_INTERFACE_MODE_RGMII;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode)) {
+ dev_err(priv->dev,
+ "Unsupported phy mode %d\n", phy_mode);
+ return -EINVAL;
+ }
+ slave->phy_port_t = PORT_MII;
} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
has_phy = true;
phy_mode = PHY_INTERFACE_MODE_NA;
@@ -2293,7 +2346,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
}
if (has_phy) {
- if (priv->ss_version == XGBE_SS_VERSION_10)
+ if (IS_SS_ID_XGBE(priv))
hndlr = xgbe_adjust_link;
slave->phy = of_phy_connect(gbe_intf->ndev,
@@ -2722,6 +2775,61 @@ static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
}
#endif /* CONFIG_TI_CPTS */
+static int gbe_set_rx_mode(void *intf_priv, bool promisc)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct cpsw_ale *ale = gbe_dev->ale;
+ unsigned long timeout;
+ int i, ret = -ETIMEDOUT;
+
+ /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
+ * slaves are port 1 and up
+ */
+ for (i = 0; i <= gbe_dev->num_slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, !!promisc);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, !!promisc);
+ }
+
+ if (!promisc) {
+ /* Don't Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+ dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
+ return 0;
+ }
+
+ timeout = jiffies + HZ;
+
+ /* Clear All Untouched entries */
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+ do {
+ cpu_relax();
+ if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
+ ret = 0;
+ break;
+ }
+
+ } while (time_after(timeout, jiffies));
+
+ /* Make sure it is not a false timeout */
+ if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+ return ret;
+
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(ale,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ -1);
+
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+ dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
+ return ret;
+}
+
static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
{
struct gbe_intf *gbe_intf = intf_priv;
@@ -2764,7 +2872,7 @@ static void netcp_ethss_timer(struct timer_list *t)
/* A timer runs as a BH, no need to block them */
spin_lock(&gbe_dev->hw_stats_lock);
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ if (IS_SS_ID_VER_14(gbe_dev))
gbe_update_stats_ver14(gbe_dev, NULL);
else
gbe_update_stats(gbe_dev, NULL);
@@ -2807,7 +2915,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
GBE_RTL_VERSION(reg), GBE_IDENT(reg));
/* For 10G and on NetCP 1.5, use directed to port */
- if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
+ if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
if (gbe_dev->enable_ale)
@@ -2911,8 +3019,10 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
slave->link_interface = SGMII_LINK_MAC_PHY;
}
+ slave->node = node;
slave->open = false;
if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+ (slave->link_interface == RGMII_LINK_MAC_PHY) ||
(slave->link_interface == XGMII_LINK_MAC_PHY))
slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
@@ -2924,7 +3034,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
/* Emac regs memmap are contiguous but port regs are not */
port_reg_num = slave->slave_num;
- if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
+ if (IS_SS_ID_VER_14(gbe_dev)) {
if (slave->slave_num > 1) {
port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
port_reg_num -= 2;
@@ -2939,7 +3049,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
emac_reg_ofs = GBENU_EMAC_OFFSET;
port_reg_blk_sz = 0x1000;
emac_reg_blk_sz = 0x1000;
- } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
+ } else if (IS_SS_ID_XGBE(gbe_dev)) {
port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
emac_reg_ofs = XGBE10_EMAC_OFFSET;
port_reg_blk_sz = 0x30;
@@ -2955,7 +3065,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
(emac_reg_blk_sz * slave->slave_num);
- if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
+ if (IS_SS_ID_VER_14(gbe_dev)) {
/* Initialize slave port register offsets */
GBE_SET_REG_OFS(slave, port_regs, port_vlan);
GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
@@ -2976,6 +3086,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
/* Initialize slave port register offsets */
GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
@@ -2989,7 +3100,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
- } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
+ } else if (IS_SS_ID_XGBE(gbe_dev)) {
/* Initialize slave port register offsets */
XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
@@ -3039,7 +3150,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
continue;
}
- gbe_sgmii_config(gbe_dev, slave);
+ if (!IS_SS_ID_2U(gbe_dev))
+ gbe_sgmii_config(gbe_dev, slave);
gbe_port_reset(slave);
gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
@@ -3073,6 +3185,9 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
if (slave->link_interface == SGMII_LINK_MAC_PHY) {
phy_mode = PHY_INTERFACE_MODE_SGMII;
slave->phy_port_t = PORT_MII;
+ } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
+ phy_mode = PHY_INTERFACE_MODE_RGMII;
+ slave->phy_port_t = PORT_MII;
} else {
phy_mode = PHY_INTERFACE_MODE_NA;
slave->phy_port_t = PORT_FIBRE;
@@ -3080,6 +3195,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
for_each_sec_slave(slave, gbe_dev) {
if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != RGMII_LINK_MAC_PHY) &&
(slave->link_interface != XGMII_LINK_MAC_PHY))
continue;
slave->phy =
@@ -3090,7 +3206,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
if (!slave->phy) {
dev_err(dev, "phy not found for slave %d\n",
slave->slave_num);
- slave->phy = NULL;
} else {
dev_dbg(dev, "phy found: id is: 0x%s\n",
phydev_name(slave->phy));
@@ -3355,7 +3470,7 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
gbe_dev->et_stats = gbenu_et_stats;
- if (IS_SS_ID_NU(gbe_dev))
+ if (IS_SS_ID_MU(gbe_dev))
gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
else
@@ -3396,7 +3511,9 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->switch_regs = regs;
- gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+ if (!IS_SS_ID_2U(gbe_dev))
+ gbe_dev->sgmii_port_regs =
+ gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
/* Although sgmii modules are mem mapped to one contiguous
* region on GBENU devices, setting sgmii_port34_regs allows
@@ -3419,6 +3536,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
/* Subsystem registers */
GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+ /* ok to set for MU, but used by 2U only */
+ GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
/* Switch module registers */
GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
@@ -3464,6 +3583,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
gbe_dev->max_num_slaves = 8;
} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
gbe_dev->max_num_slaves = 1;
+ gbe_module.set_rx_mode = gbe_set_rx_mode;
} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
gbe_dev->max_num_slaves = 2;
} else {
@@ -3508,7 +3628,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ if (IS_SS_ID_VER_14(gbe_dev))
ret = set_gbe_ethss14_priv(gbe_dev, node);
else if (IS_SS_ID_MU(gbe_dev))
ret = set_gbenu_ethss_priv(gbe_dev, node);
@@ -3606,7 +3726,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
spin_lock_bh(&gbe_dev->hw_stats_lock);
for (i = 0; i < gbe_dev->num_stats_mods; i++) {
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ if (IS_SS_ID_VER_14(gbe_dev))
gbe_reset_mod_stats_ver14(gbe_dev, i);
else
gbe_reset_mod_stats(gbe_dev, i);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index e74e1e897864..f24f48f33802 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -900,7 +900,6 @@ static void axienet_dma_err_handler(unsigned long data);
* @ndev: Pointer to net_device structure
*
* Return: 0, on success.
- * -ENODEV, if PHY cannot be connected to
* non-zero error value on failure
*
* This is the driver open routine. It calls phy_start to start the PHY device.