summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c18
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c26
-rw-r--r--drivers/acpi/acpi_video.c15
-rw-r--r--drivers/acpi/bus.c83
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/video_detect.c58
-rw-r--r--drivers/acpi/x86/utils.c1
-rw-r--r--drivers/base/cacheinfo.c16
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/block/ublk_drv.c26
-rw-r--r--drivers/block/virtio_blk.c269
-rw-r--r--drivers/bluetooth/btbcm.c2
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bus/imx-weim.c6
-rw-r--r--drivers/clk/clk-renesas-pcie.c3
-rw-r--r--drivers/clk/imx/clk-imx6ul.c10
-rw-r--r--drivers/clk/sprd/common.c9
-rw-r--r--drivers/counter/104-quad-8.c31
-rw-r--r--drivers/cpufreq/amd-pstate.c18
-rw-r--r--drivers/cxl/core/hdm.c126
-rw-r--r--drivers/cxl/core/pci.c38
-rw-r--r--drivers/cxl/core/pmem.c6
-rw-r--r--drivers/cxl/core/port.c38
-rw-r--r--drivers/cxl/core/region.c33
-rw-r--r--drivers/cxl/cxl.h8
-rw-r--r--drivers/cxl/cxlpci.h14
-rw-r--r--drivers/cxl/port.c4
-rw-r--r--drivers/dma/apple-admac.c20
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/xilinx/xdma.c2
-rw-r--r--drivers/firmware/psci/psci.c3
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c87
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c3
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-input.c6
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-topre.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c4
-rw-r--r--drivers/hv/connection.c4
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h20
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c60
-rw-r--r--drivers/i2c/busses/i2c-ocores.c35
-rw-r--r--drivers/i2c/i2c-core-of.c5
-rw-r--r--drivers/iio/accel/kionix-kx022a.c2
-rw-r--r--drivers/iio/adc/ad7791.c2
-rw-r--r--drivers/iio/adc/ltc2497.c6
-rw-r--r--drivers/iio/adc/max11410.c22
-rw-r--r--drivers/iio/adc/palmas_gpadc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c10
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/dac/cio-dac.c4
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/industrialio-buffer.c21
-rw-r--r--drivers/iio/light/cm32181.c12
-rw-r--r--drivers/iio/light/vcnl4000.c3
-rw-r--r--drivers/infiniband/core/cma.c60
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h2
-rw-r--r--drivers/infiniband/hw/irdma/cm.c16
-rw-r--r--drivers/infiniband/hw/irdma/cm.h2
-rw-r--r--drivers/infiniband/hw/irdma/hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/utils.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/input/joystick/xpad.c7
-rw-r--r--drivers/input/mouse/alps.c16
-rw-r--r--drivers/input/mouse/focaltech.c8
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h36
-rw-r--r--drivers/input/touchscreen/goodix.c14
-rw-r--r--drivers/iommu/exynos-iommu.c17
-rw-r--r--drivers/iommu/intel/dmar.c3
-rw-r--r--drivers/iommu/intel/iommu.h2
-rw-r--r--drivers/iommu/intel/irq_remapping.c6
-rw-r--r--drivers/iommu/intel/perfmon.c68
-rw-r--r--drivers/iommu/iommufd/pages.c16
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/media/i2c/imx290.c6
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c4
-rw-r--r--drivers/memstick/core/memstick.c5
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mtd/mtdblock.c12
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c6
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c3
-rw-r--r--drivers/mtd/spi-nor/core.c14
-rw-r--r--drivers/mtd/spi-nor/core.h2
-rw-r--r--drivers/mtd/spi-nor/debugfs.c11
-rw-r--r--drivers/mtd/ubi/build.c21
-rw-r--r--drivers/mtd/ubi/wl.c4
-rw-r--r--drivers/net/bonding/bond_main.c42
-rw-r--r--drivers/net/dsa/microchip/ksz8.h8
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c181
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mt7530-mdio.c16
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mt7530.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c20
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h1
-rw-r--r--drivers/net/dsa/ocelot/felix.c5
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c43
-rw-r--r--drivers/net/dsa/qca/Kconfig8
-rw-r--r--drivers/net/dsa/qca/Makefile3
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c20
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c277
-rw-r--r--drivers/net/dsa/qca/qca8k.h74
-rw-r--r--drivers/net/dsa/qca/qca8k_leds.h16
-rw-r--r--drivers/net/ethernet/amd/Kconfig12
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/pds_core/Makefile14
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c290
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c264
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c597
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h312
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c170
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c351
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c183
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c194
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c475
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c21
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c110
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h7
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c32
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c44
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c40
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c522
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c286
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c204
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c287
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c1126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c241
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c294
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c22
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c383
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c60
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h15
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c50
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mm.c107
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c51
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c178
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c197
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c171
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c201
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h176
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c119
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c7
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c48
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c5
-rw-r--r--drivers/net/fddi/skfp/rmt.c6
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/macsec.c14
-rw-r--r--drivers/net/mdio/Kconfig3
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/aquantia_hwmon.c2
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/marvell.c83
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/micrel.c6
-rw-r--r--drivers/net/phy/mxl-gpy.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c14
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy_device.c104
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/sfp.c25
-rw-r--r--drivers/net/thunderbolt/main.c25
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/veth.c27
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c36
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c13
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c3
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c1
-rw-r--r--drivers/net/wwan/t7xx/Makefile2
-rw-r--r--drivers/net/wwan/wwan_core.c3
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/tcp.c46
-rw-r--r--drivers/of/dynamic.c1
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c10
-rw-r--r--drivers/pci/doe.c30
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c34
-rw-r--r--drivers/pinctrl/mediatek/Kconfig44
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c1
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c23
-rw-r--r--drivers/platform/x86/intel/pmc/core.c13
-rw-r--r--drivers/platform/x86/think-lmi.c20
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/pwm/core.c12
-rw-r--r--drivers/pwm/pwm-cros-ec.c1
-rw-r--r--drivers/pwm/pwm-hibvt.c1
-rw-r--r--drivers/pwm/pwm-iqs620a.c1
-rw-r--r--drivers/pwm/pwm-meson.c8
-rw-r--r--drivers/pwm/pwm-sprd.c1
-rw-r--r--drivers/regulator/fan53555.c13
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/sm5703-regulator.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/ses.c20
-rw-r--r--drivers/spi/spi-rockchip-sfc.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/tee/tee_shm.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c1
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c9
-rw-r--r--drivers/thermal/intel/therm_throt.c73
-rw-r--r--drivers/thermal/thermal_sysfs.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/fsl_lpuart.c10
-rw-r--r--drivers/tty/serial/sh-sci.c10
-rw-r--r--drivers/ufs/core/ufshcd.c47
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-tegra.c6
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/typec/altmodes/displayport.c6
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c8
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c13
-rw-r--r--drivers/vhost/scsi.c39
-rw-r--r--drivers/video/fbdev/core/fbcon.c18
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
414 files changed, 12324 insertions, 3824 deletions
diff --git a/drivers/accel/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 55eb0203817f..8598056216e7 100644
--- a/drivers/accel/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
@@ -914,7 +914,7 @@ void hl_hwmon_fini(struct hl_device *hdev)
void hl_hwmon_release_resources(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
+ const struct hwmon_channel_info * const *channel_info_arr;
int i = 0;
if (!hdev->hl_chip_info->info)
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 910301fae435..3c6f1e16cf2f 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -461,26 +461,22 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
- ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
- &acquire_ctx);
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
if (ret) {
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
- for (i = 0; i < buf_count; i++) {
- ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
- if (ret) {
- ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
- goto unlock_reservations;
- }
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ goto unlock_reservations;
}
- for (i = 0; i < buf_count; i++)
- dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
unlock_reservations:
- drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
wmb(); /* Flush write combining buffers */
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 7df72fa8100f..bde42d6383da 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -140,32 +140,28 @@ int ivpu_pm_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
- int ret;
+ unsigned long timeout;
ivpu_dbg(vdev, PM, "Suspend..\n");
- ret = ivpu_suspend(vdev);
- if (ret && vdev->pm->suspend_reschedule_counter) {
- ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
- vdev->pm->suspend_reschedule_counter);
- pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
- vdev->pm->suspend_reschedule_counter--;
- return -EBUSY;
- } else if (!vdev->pm->suspend_reschedule_counter) {
- ivpu_warn(vdev, "Failed to enter idle, force suspend\n");
- ivpu_pm_prepare_cold_boot(vdev);
- } else {
- ivpu_pm_prepare_warm_boot(vdev);
+ timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
+ while (!ivpu_hw_is_idle(vdev)) {
+ cond_resched();
+ if (time_after_eq(jiffies, timeout)) {
+ ivpu_err(vdev, "Failed to enter idle on system suspend\n");
+ return -EBUSY;
+ }
}
- vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+ ivpu_suspend(vdev);
+ ivpu_pm_prepare_warm_boot(vdev);
pci_save_state(to_pci_dev(dev));
pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
ivpu_dbg(vdev, PM, "Suspend done.\n");
- return ret;
+ return 0;
}
int ivpu_pm_resume_cb(struct device *dev)
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 97b711e57bff..c7a6d0b69dab 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1984,6 +1984,7 @@ static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
+ bool auto_detect;
int error;
acpi_status status;
@@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
mutex_unlock(&video_list_lock);
/*
- * The userspace visible backlight_device gets registered separately
- * from acpi_video_register_backlight().
+ * If backlight-type auto-detection is used then a native backlight may
+ * show up later and this may change the result from video to native.
+ * Therefor normally the userspace visible /sys/class/backlight device
+ * gets registered separately by the GPU driver calling
+ * acpi_video_register_backlight() when an internal panel is detected.
+ * Register the backlight now when not using auto-detection, so that
+ * when the kernel cmdline or DMI-quirks are used the backlight will
+ * get registered even if acpi_video_register_backlight() is not called.
*/
acpi_video_run_bcl_for_osi(video);
+ if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
+ !auto_detect)
+ acpi_video_bus_register_backlight(video);
+
acpi_video_bus_add_notify_handler(video);
return 0;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9531dd0fef50..a96da65057b1 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -459,85 +459,67 @@ out_free:
Notification Handling
-------------------------------------------------------------------------- */
-/*
- * acpi_bus_notify
- * ---------------
- * Callback for all 'system-level' device notifications (values 0x00-0x7F).
+/**
+ * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
+ * @handle: Target ACPI object.
+ * @type: Notification type.
+ * @data: Ignored.
+ *
+ * This only handles notifications related to device hotplug.
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
- u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- bool hotplug_event = false;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
- hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
- hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_WAKE:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
- break;
+ return;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
- break;
+ return;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n");
- break;
+ return;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n");
- break;
+ return;
case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n");
- break;
+ return;
default:
acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
- break;
+ return;
}
adev = acpi_get_acpi_dev(handle);
- if (!adev)
- goto err;
-
- if (adev->dev.driver) {
- struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
-
- if (driver && driver->ops.notify &&
- (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
- driver->ops.notify(adev, type);
- }
-
- if (!hotplug_event) {
- acpi_put_acpi_dev(adev);
- return;
- }
- if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+ if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
acpi_put_acpi_dev(adev);
- err:
- acpi_evaluate_ost(handle, type, ost_code, NULL);
+ acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
@@ -562,42 +544,51 @@ static u32 acpi_device_fixed_event(void *data)
return ACPI_INTERRUPT_HANDLED;
}
-static int acpi_device_install_notify_handler(struct acpi_device *device)
+static int acpi_device_install_notify_handler(struct acpi_device *device,
+ struct acpi_driver *acpi_drv)
{
acpi_status status;
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event,
device);
- else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event,
device);
- else
- status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY,
+ } else {
+ u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+ ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+
+ status = acpi_install_notify_handler(device->handle, type,
acpi_notify_device,
device);
+ }
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
-static void acpi_device_remove_notify_handler(struct acpi_device *device)
+static void acpi_device_remove_notify_handler(struct acpi_device *device,
+ struct acpi_driver *acpi_drv)
{
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event);
- else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event);
- else
- acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ } else {
+ u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+ ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+
+ acpi_remove_notify_handler(device->handle, type,
acpi_notify_device);
+ }
}
/* Handle events targeting \_SB device (at present only graceful shutdown) */
@@ -1039,7 +1030,7 @@ static int acpi_device_probe(struct device *dev)
acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
- ret = acpi_device_install_notify_handler(acpi_dev);
+ ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
@@ -1062,7 +1053,7 @@ static void acpi_device_remove(struct device *dev)
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
if (acpi_drv->ops.notify)
- acpi_device_remove_notify_handler(acpi_dev);
+ acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 7b4801ce62d6..e8492b3a393a 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -440,6 +440,13 @@ static const struct dmi_system_id asus_laptop[] = {
},
},
{
+ .ident = "Asus ExpertBook B1502CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
+ },
+ },
+ {
.ident = "Asus ExpertBook B2402CBA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fd7cbce8076e..e85729fc481f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -277,6 +277,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
/*
+ * Models which need acpi_video backlight control where the GPU drivers
+ * do not call acpi_video_register_backlight() because no internal panel
+ * is detected. Typically these are all-in-ones (monitors with builtin
+ * PC) where the panel connection shows up as regular DP instead of eDP.
+ */
+ {
+ .callback = video_detect_force_video,
+ /* Apple iMac14,1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
+ },
+ },
+ {
+ .callback = video_detect_force_video,
+ /* Apple iMac14,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
+ },
+ },
+
+ /*
+ * Older models with nvidia GPU which need acpi_video backlight
+ * control and where the old nvidia binary driver series does not
+ * call acpi_video_register_backlight().
+ */
+ {
+ .callback = video_detect_force_video,
+ /* ThinkPad W530 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
+ },
+ },
+
+ /*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
* when userspace is not handling brightness key events. Disable
@@ -782,7 +819,7 @@ static bool prefer_native_over_acpi_video(void)
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
*/
-static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
@@ -807,6 +844,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
native_available = true;
mutex_unlock(&init_mutex);
+ if (auto_detect)
+ *auto_detect = false;
+
/*
* The below heuristics / detection steps are in order of descending
* presedence. The commandline takes presedence over anything else.
@@ -818,6 +858,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
+ if (auto_detect)
+ *auto_detect = true;
+
/* Special cases such as nvidia_wmi_ec and apple gmux. */
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
@@ -837,15 +880,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
/* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
-
-enum acpi_backlight_type acpi_video_get_backlight_type(void)
-{
- return __acpi_video_get_backlight_type(false);
-}
-EXPORT_SYMBOL(acpi_video_get_backlight_type);
-
-bool acpi_video_backlight_use_native(void)
-{
- return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
-}
-EXPORT_SYMBOL(acpi_video_backlight_use_native);
+EXPORT_SYMBOL(__acpi_video_get_backlight_type);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index da5727069d85..ba420a28a4aa 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -213,6 +213,7 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
disk in the system.
*/
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index f6573c335f4c..f3903d002819 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -474,12 +474,18 @@ int detect_cache_attributes(unsigned int cpu)
populate_leaves:
/*
- * populate_cache_leaves() may completely setup the cache leaves and
- * shared_cpu_map or it may leave it partially setup.
+ * If LLC is valid the cache leaves were already populated so just go to
+ * update the cpu map.
*/
- ret = populate_cache_leaves(cpu);
- if (ret)
- goto free_ci;
+ if (!last_level_cache_is_valid(cpu)) {
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
+ ret = populate_cache_leaves(cpu);
+ if (ret)
+ goto free_ci;
+ }
/*
* For systems using DT for cache hierarchy, fw_token
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28eb59fd71ca..bc31bb7072a2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- /* suppress uevents while reconfiguring the device */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
-
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
}
+ /* suppress uevents while reconfiguring the device */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
@@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ /* enable and uncork uevent now that we are done */
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+
loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
+
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
- error = 0;
-done:
- /* enable and uncork uevent now that we are done */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
- return error;
+ return 0;
out_unlock:
loop_global_unlock(lo, is_loop);
@@ -1130,7 +1130,7 @@ out_putf:
fput(file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- goto done;
+ return error;
}
static void __loop_clr_fd(struct loop_device *lo, bool release)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c73cc57ec547..604c1a13c76e 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
const struct ublk_param_basic *p = &ub->params.basic;
- if (p->logical_bs_shift > PAGE_SHIFT)
+ if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift)
@@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req);
}
-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
+ struct ublksrv_io_cmd *ub_cmd)
{
- struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
@@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
}
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
+ struct ublksrv_io_cmd ub_cmd;
+
+ /*
+ * Not necessary for async retry, but let's keep it simple and always
+ * copy the values to avoid any potential reuse.
+ */
+ ub_cmd.q_id = READ_ONCE(ub_src->q_id);
+ ub_cmd.tag = READ_ONCE(ub_src->tag);
+ ub_cmd.result = READ_ONCE(ub_src->result);
+ ub_cmd.addr = READ_ONCE(ub_src->addr);
+
+ return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
+}
+
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
@@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
+ if (ret)
+ ub->params.types = 0;
}
mutex_unlock(&ub->mutex);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2723eede6f21..2b918e28acaa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -96,16 +96,14 @@ struct virtblk_req {
/*
* The zone append command has an extended in header.
- * The status field in zone_append_in_hdr must have
- * the same offset in virtblk_req as the non-zoned
- * status field above.
+ * The status field in zone_append_in_hdr must always
+ * be the last byte.
*/
struct {
+ __virtio64 sector;
u8 status;
- u8 reserved[7];
- __le64 append_sector;
- } zone_append_in_hdr;
- };
+ } zone_append;
+ } in_hdr;
size_t in_hdr_len;
@@ -154,7 +152,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
sgs[num_out + num_in++] = vbr->sg_table.sgl;
}
- sg_init_one(&in_hdr, &vbr->status, vbr->in_hdr_len);
+ sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
sgs[num_out + num_in++] = &in_hdr;
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
@@ -242,11 +240,14 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
struct request *req,
struct virtblk_req *vbr)
{
- size_t in_hdr_len = sizeof(vbr->status);
+ size_t in_hdr_len = sizeof(vbr->in_hdr.status);
bool unmap = false;
u32 type;
u64 sector = 0;
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
+ return BLK_STS_NOTSUPP;
+
/* Set fields for all request types */
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
@@ -287,7 +288,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
case REQ_OP_ZONE_APPEND:
type = VIRTIO_BLK_T_ZONE_APPEND;
sector = blk_rq_pos(req);
- in_hdr_len = sizeof(vbr->zone_append_in_hdr);
+ in_hdr_len = sizeof(vbr->in_hdr.zone_append);
break;
case REQ_OP_ZONE_RESET:
type = VIRTIO_BLK_T_ZONE_RESET;
@@ -297,7 +298,10 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
- /* Out header already filled in, nothing to do */
+ /*
+ * Out header has already been prepared by the caller (virtblk_get_id()
+ * or virtblk_submit_zone_report()), nothing to do here.
+ */
return 0;
default:
WARN_ON_ONCE(1);
@@ -318,16 +322,28 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
return 0;
}
+/*
+ * The status byte is always the last byte of the virtblk request
+ * in-header. This helper fetches its value for all in-header formats
+ * that are currently defined.
+ */
+static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
+{
+ return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
+}
+
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- blk_status_t status = virtblk_result(vbr->status);
+ blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = le64_to_cpu(vbr->zone_append_in_hdr.append_sector);
+ req->__sector = virtio64_to_cpu(vblk->vdev,
+ vbr->in_hdr.zone_append.sector);
blk_mq_end_request(req, status);
}
@@ -355,7 +371,7 @@ static int virtblk_handle_req(struct virtio_blk_vq *vq,
if (likely(!blk_should_fake_timeout(req->q)) &&
!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, vbr->status,
+ !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
virtblk_complete_batch))
virtblk_request_done(req);
req_done++;
@@ -550,7 +566,6 @@ static void virtio_queue_rqs(struct request **rqlist)
#ifdef CONFIG_BLK_DEV_ZONED
static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
unsigned int nr_zones,
- unsigned int zone_sectors,
size_t *buflen)
{
struct request_queue *q = vblk->disk->queue;
@@ -558,7 +573,7 @@ static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
void *buf;
nr_zones = min_t(unsigned int, nr_zones,
- get_capacity(vblk->disk) >> ilog2(zone_sectors));
+ get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
bufsize = sizeof(struct virtio_blk_zone_report) +
nr_zones * sizeof(struct virtio_blk_zone_descriptor);
@@ -592,7 +607,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req);
- vbr->in_hdr_len = sizeof(vbr->status);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
@@ -601,7 +616,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
goto out;
blk_execute_rq(req, false);
- err = blk_status_to_errno(virtblk_result(vbr->status));
+ err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
@@ -609,29 +624,72 @@ out:
static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry,
- unsigned int idx, unsigned int zone_sectors,
- report_zones_cb cb, void *data)
+ unsigned int idx, report_zones_cb cb, void *data)
{
struct blk_zone zone = { };
- if (entry->z_type != VIRTIO_BLK_ZT_SWR &&
- entry->z_type != VIRTIO_BLK_ZT_SWP &&
- entry->z_type != VIRTIO_BLK_ZT_CONV) {
- dev_err(&vblk->vdev->dev, "invalid zone type %#x\n",
- entry->z_type);
- return -EINVAL;
+ zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
+ if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
+ zone.len = vblk->zone_sectors;
+ else
+ zone.len = get_capacity(vblk->disk) - zone.start;
+ zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
+ zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
+
+ switch (entry->z_type) {
+ case VIRTIO_BLK_ZT_SWR:
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ break;
+ case VIRTIO_BLK_ZT_SWP:
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
+ break;
+ case VIRTIO_BLK_ZT_CONV:
+ zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ break;
+ default:
+ dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
+ zone.start, entry->z_type);
+ return -EIO;
}
- zone.type = entry->z_type;
- zone.cond = entry->z_state;
- zone.len = zone_sectors;
- zone.capacity = le64_to_cpu(entry->z_cap);
- zone.start = le64_to_cpu(entry->z_start);
- if (zone.cond == BLK_ZONE_COND_FULL)
+ switch (entry->z_state) {
+ case VIRTIO_BLK_ZS_EMPTY:
+ zone.cond = BLK_ZONE_COND_EMPTY;
+ break;
+ case VIRTIO_BLK_ZS_CLOSED:
+ zone.cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case VIRTIO_BLK_ZS_FULL:
+ zone.cond = BLK_ZONE_COND_FULL;
zone.wp = zone.start + zone.len;
- else
- zone.wp = le64_to_cpu(entry->z_wp);
+ break;
+ case VIRTIO_BLK_ZS_EOPEN:
+ zone.cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case VIRTIO_BLK_ZS_IOPEN:
+ zone.cond = BLK_ZONE_COND_IMP_OPEN;
+ break;
+ case VIRTIO_BLK_ZS_NOT_WP:
+ zone.cond = BLK_ZONE_COND_NOT_WP;
+ break;
+ case VIRTIO_BLK_ZS_RDONLY:
+ zone.cond = BLK_ZONE_COND_READONLY;
+ zone.wp = ULONG_MAX;
+ break;
+ case VIRTIO_BLK_ZS_OFFLINE:
+ zone.cond = BLK_ZONE_COND_OFFLINE;
+ zone.wp = ULONG_MAX;
+ break;
+ default:
+ dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
+ zone.start, entry->z_state);
+ return -EIO;
+ }
+ /*
+ * The callback below checks the validity of the reported
+ * entry data, no need to further validate it here.
+ */
return cb(&zone, idx, data);
}
@@ -641,39 +699,47 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
{
struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report;
- unsigned int zone_sectors = vblk->zone_sectors;
- unsigned int nz, i;
- int ret, zone_idx = 0;
+ unsigned long long nz, i;
size_t buflen;
+ unsigned int zone_idx = 0;
+ int ret;
if (WARN_ON_ONCE(!vblk->zone_sectors))
return -EOPNOTSUPP;
- report = virtblk_alloc_report_buffer(vblk, nr_zones,
- zone_sectors, &buflen);
+ report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
if (!report)
return -ENOMEM;
+ mutex_lock(&vblk->vdev_mutex);
+
+ if (!vblk->vdev) {
+ ret = -ENXIO;
+ goto fail_report;
+ }
+
while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
memset(report, 0, buflen);
ret = virtblk_submit_zone_report(vblk, (char *)report,
buflen, sector);
- if (ret) {
- if (ret > 0)
- ret = -EIO;
- goto out_free;
- }
- nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+ if (ret)
+ goto fail_report;
+
+ nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
+ nr_zones);
if (!nz)
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i],
- zone_idx, zone_sectors, cb, data);
+ zone_idx, cb, data);
if (ret)
- goto out_free;
- sector = le64_to_cpu(report->zones[i].z_start) + zone_sectors;
+ goto fail_report;
+
+ sector = virtio64_to_cpu(vblk->vdev,
+ report->zones[i].z_start) +
+ vblk->zone_sectors;
zone_idx++;
}
}
@@ -682,7 +748,8 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
ret = zone_idx;
else
ret = -EINVAL;
-out_free:
+fail_report:
+ mutex_unlock(&vblk->vdev_mutex);
kvfree(report);
return ret;
}
@@ -691,20 +758,28 @@ static void virtblk_revalidate_zones(struct virtio_blk *vblk)
{
u8 model;
- if (!vblk->zone_sectors)
- return;
-
virtio_cread(vblk->vdev, struct virtio_blk_config,
zoned.model, &model);
- if (!blk_revalidate_disk_zones(vblk->disk, NULL))
- set_capacity_and_notify(vblk->disk, 0);
+ switch (model) {
+ default:
+ dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
+ fallthrough;
+ case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
+ return;
+ case VIRTIO_BLK_Z_HM:
+ WARN_ON_ONCE(!vblk->zone_sectors);
+ if (!blk_revalidate_disk_zones(vblk->disk, NULL))
+ set_capacity_and_notify(vblk->disk, 0);
+ }
}
static int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk,
struct request_queue *q)
{
- u32 v;
+ u32 v, wg;
u8 model;
int ret;
@@ -713,16 +788,11 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
switch (model) {
case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ /* Present the host-aware device as non-zoned */
return 0;
case VIRTIO_BLK_Z_HM:
break;
- case VIRTIO_BLK_Z_HA:
- /*
- * Present the host-aware device as a regular drive.
- * TODO It is possible to add an option to make it appear
- * in the system as a zoned drive.
- */
- return 0;
default:
dev_err(&vdev->dev, "unsupported zone model %d\n", model);
return -EINVAL;
@@ -735,32 +805,31 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
- disk_set_max_open_zones(vblk->disk, le32_to_cpu(v));
-
- dev_dbg(&vdev->dev, "max open zones = %u\n", le32_to_cpu(v));
+ disk_set_max_open_zones(vblk->disk, v);
+ dev_dbg(&vdev->dev, "max open zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v);
- disk_set_max_active_zones(vblk->disk, le32_to_cpu(v));
- dev_dbg(&vdev->dev, "max active zones = %u\n", le32_to_cpu(v));
+ disk_set_max_active_zones(vblk->disk, v);
+ dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
- zoned.write_granularity, &v);
- if (!v) {
+ zoned.write_granularity, &wg);
+ if (!wg) {
dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV;
}
- blk_queue_physical_block_size(q, le32_to_cpu(v));
- blk_queue_io_min(q, le32_to_cpu(v));
+ blk_queue_physical_block_size(q, wg);
+ blk_queue_io_min(q, wg);
- dev_dbg(&vdev->dev, "write granularity = %u\n", le32_to_cpu(v));
+ dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
/*
* virtio ZBD specification doesn't require zones to be a power of
* two sectors in size, but the code in this driver expects that.
*/
- virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, &v);
- vblk->zone_sectors = le32_to_cpu(v);
+ virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
+ &vblk->zone_sectors);
if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
dev_err(&vdev->dev,
"zoned device with non power of two zone size %u\n",
@@ -783,36 +852,46 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
return -ENODEV;
}
- blk_queue_max_zone_append_sectors(q, le32_to_cpu(v));
- dev_dbg(&vdev->dev, "max append sectors = %u\n", le32_to_cpu(v));
+ if ((v << SECTOR_SHIFT) < wg) {
+ dev_err(&vdev->dev,
+ "write granularity %u exceeds max_append_sectors %u limit\n",
+ wg, v);
+ return -ENODEV;
+ }
+
+ blk_queue_max_zone_append_sectors(q, v);
+ dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
}
return ret;
}
-static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
-{
- return virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED);
-}
#else
/*
* Zoned block device support is not configured in this kernel.
- * We only need to define a few symbols to avoid compilation errors.
+ * Host-managed zoned devices can't be supported, but others are
+ * good to go as regular block devices.
*/
#define virtblk_report_zones NULL
+
static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
{
}
+
static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk, struct request_queue *q)
{
- return -EOPNOTSUPP;
-}
+ u8 model;
-static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
-{
- return false;
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
+ if (model == VIRTIO_BLK_Z_HM) {
+ dev_err(&vdev->dev,
+ "virtio_blk: zoned devices are not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -831,7 +910,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
return PTR_ERR(req);
vbr = blk_mq_rq_to_pdu(req);
- vbr->in_hdr_len = sizeof(vbr->status);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
@@ -840,7 +919,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
goto out;
blk_execute_rq(req, false);
- err = blk_status_to_errno(virtblk_result(vbr->status));
+ err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
out:
blk_mq_free_request(req);
return err;
@@ -1498,15 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
- if (virtblk_has_zoned_feature(vdev)) {
+ /*
+ * All steps that follow use the VQs therefore they need to be
+ * placed after the virtio_device_ready() call above.
+ */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
err = virtblk_probe_zoned_device(vdev, vblk, q);
if (err)
goto out_cleanup_disk;
}
- dev_info(&vdev->dev, "blk config size: %zu\n",
- sizeof(struct virtio_blk_config));
-
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
@@ -1607,10 +1687,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
- VIRTIO_BLK_F_SECURE_ERASE,
-#ifdef CONFIG_BLK_DEV_ZONED
- VIRTIO_BLK_F_ZONED,
-#endif /* CONFIG_BLK_DEV_ZONED */
+ VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 3006e2a0f37e..43e98a598bd9 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
len = strlen(tmp) + 1;
board_type = devm_kzalloc(dev, len, GFP_KERNEL);
strscpy(board_type, tmp, len);
- for (i = 0; i < board_type[i]; i++) {
+ for (i = 0; i < len; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 02893600db39..51000320e1ea 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -358,6 +358,7 @@ static void btsdio_remove(struct sdio_func *func)
if (!data)
return;
+ cancel_work_sync(&data->work);
hdev = data->hdev;
sdio_set_drvdata(func, NULL);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 36d42484142a..cf463c1d2102 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -329,6 +329,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
"Failed to setup timing for '%pOF'\n", rd->dn);
if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+ /*
+ * Clear the flag before adding the device so that
+ * fw_devlink doesn't skip adding consumers to this
+ * device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
dev_err(&pdev->dev,
"Failed to create child device '%pOF'\n",
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index f91f30560820..ff3a52d48479 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -143,8 +143,9 @@ static int rs9_regmap_i2c_read(void *context,
static const struct regmap_config rs9_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
+ .cache_type = REGCACHE_FLAT,
.max_register = RS9_REG_BCP,
+ .num_reg_defaults_raw = 0x8,
.rd_table = &rs9_readable_table,
.wr_table = &rs9_writeable_table,
.reg_write = rs9_regmap_i2c_write,
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 2836adb817b7..e3696a88b5a3 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -95,14 +95,16 @@ static const struct clk_div_table video_div_table[] = {
{ }
};
-static const char * enet1_ref_sels[] = { "enet1_ref_125m", "enet1_ref_pad", };
+static const char * enet1_ref_sels[] = { "enet1_ref_125m", "enet1_ref_pad", "dummy", "dummy"};
static const u32 enet1_ref_sels_table[] = { IMX6UL_GPR1_ENET1_TX_CLK_DIR,
- IMX6UL_GPR1_ENET1_CLK_SEL };
+ IMX6UL_GPR1_ENET1_CLK_SEL, 0,
+ IMX6UL_GPR1_ENET1_TX_CLK_DIR | IMX6UL_GPR1_ENET1_CLK_SEL };
static const u32 enet1_ref_sels_table_mask = IMX6UL_GPR1_ENET1_TX_CLK_DIR |
IMX6UL_GPR1_ENET1_CLK_SEL;
-static const char * enet2_ref_sels[] = { "enet2_ref_125m", "enet2_ref_pad", };
+static const char * enet2_ref_sels[] = { "enet2_ref_125m", "enet2_ref_pad", "dummy", "dummy"};
static const u32 enet2_ref_sels_table[] = { IMX6UL_GPR1_ENET2_TX_CLK_DIR,
- IMX6UL_GPR1_ENET2_CLK_SEL };
+ IMX6UL_GPR1_ENET2_CLK_SEL, 0,
+ IMX6UL_GPR1_ENET2_TX_CLK_DIR | IMX6UL_GPR1_ENET2_CLK_SEL };
static const u32 enet2_ref_sels_table_mask = IMX6UL_GPR1_ENET2_TX_CLK_DIR |
IMX6UL_GPR1_ENET2_CLK_SEL;
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index ce81e4087a8f..2bfbab8db94b 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xffff,
.fast_io = true,
};
@@ -43,6 +42,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
+ struct resource *res;
+ struct regmap_config reg_config = sprdclk_regmap_config;
if (of_find_property(node, "sprd,syscon", NULL)) {
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
@@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
return PTR_ERR(regmap);
}
} else {
- base = devm_platform_ioremap_resource(pdev, 0);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
+ reg_config.max_register = resource_size(res) - reg_config.reg_stride;
+
regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &sprdclk_regmap_config);
+ &reg_config);
if (IS_ERR(regmap)) {
pr_err("failed to init regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index deed4afadb29..d9cb937665cf 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -97,10 +97,6 @@ struct quad8 {
struct quad8_reg __iomem *reg;
};
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
/* Error flag */
#define QUAD8_FLAG_E BIT(4)
/* Up/Down flag */
@@ -133,6 +129,9 @@ struct quad8 {
#define QUAD8_CMR_QUADRATURE_X2 0x10
#define QUAD8_CMR_QUADRATURE_X4 0x18
+/* Each Counter is 24 bits wide */
+#define LS7267_CNTR_MAX GENMASK(23, 0)
+
static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
@@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
{
struct quad8 *const priv = counter_priv(counter);
struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
- unsigned int flags;
- unsigned int borrow;
- unsigned int carry;
unsigned long irqflags;
int i;
- flags = ioread8(&chan->control);
- borrow = flags & QUAD8_FLAG_BT;
- carry = !!(flags & QUAD8_FLAG_CT);
-
- /* Borrow XOR Carry effectively doubles count range */
- *val = (unsigned long)(borrow ^ carry) << 24;
+ *val = 0;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
unsigned long irqflags;
int i;
- /* Only 24-bit values are supported */
- if (val > 0xFFFFFF)
+ if (val > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
/* Handle Index signals */
if (synapse->signal->id >= 16) {
- if (priv->preset_enable[count->id])
+ if (!priv->preset_enable[count->id])
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
*action = COUNTER_SYNAPSE_ACTION_NONE;
@@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
- /* Only 24-bit values are supported */
- if (preset > 0xFFFFFF)
+ if (preset > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
*ceiling = priv->preset[count->id];
break;
default:
- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
- *ceiling = 0x1FFFFFF;
+ *ceiling = LS7267_CNTR_MAX;
break;
}
@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
- /* Only 24-bit values are supported */
- if (ceiling > 0xFFFFFF)
+ if (ceiling > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 73c7643b2697..8dd46fad151e 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -840,22 +840,20 @@ static int amd_pstate_update_status(const char *buf, size_t size)
switch(mode_idx) {
case AMD_PSTATE_DISABLE:
- if (!current_pstate_driver)
- return -EINVAL;
- if (cppc_state == AMD_PSTATE_ACTIVE)
- return -EBUSY;
- cpufreq_unregister_driver(current_pstate_driver);
- amd_pstate_driver_cleanup();
+ if (current_pstate_driver) {
+ cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_driver_cleanup();
+ }
break;
case AMD_PSTATE_PASSIVE:
if (current_pstate_driver) {
if (current_pstate_driver == &amd_pstate_driver)
return 0;
cpufreq_unregister_driver(current_pstate_driver);
- cppc_state = AMD_PSTATE_PASSIVE;
- current_pstate_driver = &amd_pstate_driver;
}
+ current_pstate_driver = &amd_pstate_driver;
+ cppc_state = AMD_PSTATE_PASSIVE;
ret = cpufreq_register_driver(current_pstate_driver);
break;
case AMD_PSTATE_ACTIVE:
@@ -863,10 +861,10 @@ static int amd_pstate_update_status(const char *buf, size_t size)
if (current_pstate_driver == &amd_pstate_epp_driver)
return 0;
cpufreq_unregister_driver(current_pstate_driver);
- current_pstate_driver = &amd_pstate_epp_driver;
- cppc_state = AMD_PSTATE_ACTIVE;
}
+ current_pstate_driver = &amd_pstate_epp_driver;
+ cppc_state = AMD_PSTATE_ACTIVE;
ret = cpufreq_register_driver(current_pstate_driver);
break;
default:
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 45deda18ed32..02cc2c38b44b 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -101,25 +101,40 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
BIT(CXL_CM_CAP_CAP_ID_HDM));
}
-static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
{
- struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm;
+ void __iomem *hdm;
+ u32 ctrl;
+ int i;
- if (!info->mem_enabled)
- return ERR_PTR(-ENODEV);
+ if (!info)
+ return false;
- cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
- if (!cxlhdm)
- return ERR_PTR(-ENOMEM);
+ cxlhdm = dev_get_drvdata(&info->port->dev);
+ hdm = cxlhdm->regs.hdm_decoder;
- cxlhdm->port = port;
- cxlhdm->decoder_count = info->ranges;
- cxlhdm->target_count = info->ranges;
- dev_set_drvdata(&port->dev, cxlhdm);
+ if (!hdm)
+ return true;
- return cxlhdm;
+ /*
+ * If HDM decoders are present and the driver is in control of
+ * Mem_Enable skip DVSEC based emulation
+ */
+ if (!info->mem_enabled)
+ return false;
+
+ /*
+ * If any decoders are committed already, there should not be any
+ * emulated DVSEC decoders.
+ */
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return false;
+ }
+
+ return true;
}
/**
@@ -138,13 +153,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
-
cxlhdm->port = port;
- crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb) {
- if (info && info->mem_enabled)
- return devm_cxl_setup_emulated_hdm(port, info);
+ dev_set_drvdata(dev, cxlhdm);
+ crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
+ if (!crb && info && info->mem_enabled) {
+ cxlhdm->decoder_count = info->ranges;
+ return cxlhdm;
+ } else if (!crb) {
dev_err(dev, "No component registers mapped\n");
return ERR_PTR(-ENXIO);
}
@@ -160,7 +176,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
return ERR_PTR(-ENXIO);
}
- dev_set_drvdata(dev, cxlhdm);
+ /*
+ * Now that the hdm capability is parsed, decide if range
+ * register emulation is needed and fixup cxlhdm accordingly.
+ */
+ if (should_emulate_decoders(info)) {
+ dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
+ info->ranges > 1 ? "s" : "");
+ cxlhdm->decoder_count = info->ranges;
+ }
return cxlhdm;
}
@@ -714,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
return 0;
}
-static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
- struct cxl_decoder *cxld, int which,
- struct cxl_endpoint_dvsec_info *info)
+static int cxl_setup_hdm_decoder_from_dvsec(
+ struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
+ int which, struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_endpoint_decoder *cxled;
+ u64 len;
+ int rc;
+
if (!is_cxl_endpoint(port))
return -EOPNOTSUPP;
- if (!range_len(&info->dvsec_range[which]))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ len = range_len(&info->dvsec_range[which]);
+ if (!len)
return -ENOENT;
cxld->target_type = CXL_DECODER_EXPANDER;
@@ -736,40 +766,24 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
port->commit_end = cxld->id;
- return 0;
-}
-
-static bool should_emulate_decoders(struct cxl_port *port)
-{
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- u32 ctrl;
- int i;
-
- if (!is_cxl_endpoint(cxlhdm->port))
- return false;
-
- if (!hdm)
- return true;
-
- /*
- * If any decoders are committed already, there should not be any
- * emulated DVSEC decoders.
- */
- for (i = 0; i < cxlhdm->decoder_count; i++) {
- ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
- return false;
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
+ return rc;
}
+ *dpa_base += len;
+ cxled->state = CXL_DECODER_STATE_AUTO;
- return true;
+ return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map, void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
- struct cxl_endpoint_decoder *cxled = NULL;
+ struct cxl_endpoint_decoder *cxled;
u64 size, base, skip, dpa_size;
bool committed;
u32 remainder;
@@ -780,11 +794,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
unsigned char target_id[8];
} target_list;
- if (should_emulate_decoders(port))
- return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
-
- if (is_endpoint_decoder(&cxld->dev))
- cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ if (should_emulate_decoders(info))
+ return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
+ which, info);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
@@ -806,9 +818,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
.end = base + size - 1,
};
- if (cxled && !committed && range_len(&info->dvsec_range[which]))
- return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
-
/* decoders are enabled if committed */
if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE;
@@ -846,7 +855,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- if (!cxled) {
+ if (!info) {
target_list.value =
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
for (i = 0; i < cxld->interleave_ways; i++)
@@ -866,6 +875,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) {
dev_err(&port->dev,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 7328a2552411..523d5b9fd7fc 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
return NULL;
}
-#define CDAT_DOE_REQ(entry_handle) \
+#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
@@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
}
struct cdat_doe_task {
- u32 request_pl;
- u32 response_pl[32];
+ __le32 request_pl;
+ __le32 response_pl[32];
struct completion c;
struct pci_doe_task task;
};
@@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev,
return rc;
}
wait_for_completion(&t.c);
- if (t.task.rv < sizeof(u32))
+ if (t.task.rv < 2 * sizeof(__le32))
return -EIO;
- *length = t.response_pl[1];
+ *length = le32_to_cpu(t.response_pl[1]);
dev_dbg(dev, "CDAT length %zu\n", *length);
return 0;
@@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev,
struct cxl_cdat *cdat)
{
size_t length = cdat->length;
- u32 *data = cdat->table;
+ __le32 *data = cdat->table;
int entry_handle = 0;
do {
DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ struct cdat_entry_header *entry;
size_t entry_dw;
- u32 *entry;
int rc;
rc = pci_doe_submit_task(cdat_doe, &t.task);
@@ -539,26 +539,34 @@ static int cxl_cdat_read_table(struct device *dev,
return rc;
}
wait_for_completion(&t.c);
- /* 1 DW header + 1 DW data min */
- if (t.task.rv < (2 * sizeof(u32)))
+
+ /* 1 DW Table Access Response Header + CDAT entry */
+ entry = (struct cdat_entry_header *)(t.response_pl + 1);
+ if ((entry_handle == 0 &&
+ t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
+ (entry_handle > 0 &&
+ (t.task.rv < sizeof(__le32) + sizeof(*entry) ||
+ t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
return -EIO;
/* Get the CXL table access header entry handle */
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
- t.response_pl[0]);
- entry = t.response_pl + 1;
- entry_dw = t.task.rv / sizeof(u32);
+ le32_to_cpu(t.response_pl[0]));
+ entry_dw = t.task.rv / sizeof(__le32);
/* Skip Header */
entry_dw -= 1;
- entry_dw = min(length / sizeof(u32), entry_dw);
+ entry_dw = min(length / sizeof(__le32), entry_dw);
/* Prevent length < 1 DW from causing a buffer overflow */
if (entry_dw) {
- memcpy(data, entry, entry_dw * sizeof(u32));
- length -= entry_dw * sizeof(u32);
+ memcpy(data, entry, entry_dw * sizeof(__le32));
+ length -= entry_dw * sizeof(__le32);
data += entry_dw;
}
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+ /* Length in CDAT header may exceed concatenation of CDAT entries */
+ cdat->length -= length;
+
return 0;
}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index c2e4b1093788..f8c38d997252 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
- struct cxl_port *port = find_cxl_root(start);
+ struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
struct device *dev;
if (!port)
@@ -253,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
struct device *dev;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb)
return -ENODEV;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 8ee6b6e2e2a4..4d1f9c5b5029 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -823,41 +823,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false;
}
-/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
-static int match_root_child(struct device *dev, const void *match)
+struct cxl_port *find_cxl_root(struct cxl_port *port)
{
- const struct device *iter = NULL;
- struct cxl_dport *dport;
- struct cxl_port *port;
-
- if (!dev_is_cxl_root_child(dev))
- return 0;
-
- port = to_cxl_port(dev);
- iter = match;
- while (iter) {
- dport = cxl_find_dport_by_dev(port, iter);
- if (dport)
- break;
- iter = iter->parent;
- }
-
- return !!iter;
-}
+ struct cxl_port *iter = port;
-struct cxl_port *find_cxl_root(struct device *dev)
-{
- struct device *port_dev;
- struct cxl_port *root;
+ while (iter && !is_cxl_root(iter))
+ iter = to_cxl_port(iter->dev.parent);
- port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
- if (!port_dev)
+ if (!iter)
return NULL;
-
- root = to_cxl_port(port_dev->parent);
- get_device(&root->dev);
- put_device(port_dev);
- return root;
+ get_device(&iter->dev);
+ return iter;
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index f29028148806..b2fd67fcebfb 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -134,9 +134,13 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_endpoint_decoder *cxled = p->targets[i];
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_ep *ep;
int rc = 0;
+ if (cxlds->rcd)
+ goto endpoint_reset;
+
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@@ -153,6 +157,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
return rc;
}
+endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld);
if (rc)
return rc;
@@ -1199,6 +1204,7 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd;
struct cxl_port *iter;
struct cxl_ep *ep;
@@ -1214,6 +1220,10 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+
+ if (cxlds->rcd)
+ continue;
iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1229,14 +1239,24 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled;
+ struct cxl_dev_state *cxlds;
+ int i, rc, rch = 0, vh = 0;
struct cxl_memdev *cxlmd;
struct cxl_port *iter;
struct cxl_ep *ep;
- int i, rc;
for (i = 0; i < p->nr_targets; i++) {
cxled = p->targets[i];
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+
+ /* validate that all targets agree on topology */
+ if (!cxlds->rcd) {
+ vh++;
+ } else {
+ rch++;
+ continue;
+ }
iter = cxled_to_port(cxled);
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1256,6 +1276,12 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
}
}
+ if (rch && vh) {
+ dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
+ cxl_region_teardown_targets(cxlr);
+ return -ENXIO;
+ }
+
return 0;
}
@@ -1648,6 +1674,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc)
goto err_decrement;
p->state = CXL_CONFIG_ACTIVE;
+ set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
}
cxled->cxld.interleave_ways = p->interleave_ways;
@@ -1749,8 +1776,6 @@ static int attach_target(struct cxl_region *cxlr,
down_read(&cxl_dpa_rwsem);
rc = cxl_region_attach(cxlr, cxled, pos);
- if (rc == 0)
- set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
return rc;
@@ -2251,7 +2276,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
* bridge for one device is the same for all.
*/
if (i == 0) {
- cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb) {
cxlr_pmem = ERR_PTR(-ENODEV);
goto out;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index f2b0962a552d..044a92d9813e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -658,7 +658,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct device *dev);
+struct cxl_port *find_cxl_root(struct cxl_port *port);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -695,13 +695,15 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
/**
* struct cxl_endpoint_dvsec_info - Cached DVSEC info
- * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE
+ * @mem_enabled: cached value of mem_enabled in the DVSEC at init time
* @ranges: Number of active HDM ranges this device uses.
+ * @port: endpoint port associated with this info instance
* @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
*/
struct cxl_endpoint_dvsec_info {
bool mem_enabled;
int ranges;
+ struct cxl_port *port;
struct range dvsec_range[2];
};
@@ -758,7 +760,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index be6a2ef3cce3..0465ef963cd6 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -68,6 +68,20 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES
};
+struct cdat_header {
+ __le32 length;
+ u8 revision;
+ u8 checksum;
+ u8 reserved[6];
+ __le32 sequence;
+} __packed;
+
+struct cdat_entry_header {
+ u8 type;
+ u8 reserved;
+ __le16 length;
+} __packed;
+
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 1049bb5ea496..22a7ab2bae7c 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -78,8 +78,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
+ struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
- struct cxl_endpoint_dvsec_info info = { 0 };
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
struct cxl_port *root;
@@ -119,7 +119,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
* This can't fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe()
*/
- root = find_cxl_root(&cxlmd->dev);
+ root = find_cxl_root(port);
/*
* Now that all endpoint decoders are successfully enumerated, try to
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 90f28bda29c8..4cf8da77bdd9 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -75,6 +75,7 @@
#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_GLOBAL_INTSTATE(idx) (0x0050 + (idx) * 4)
#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
@@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
admac_stop_chan(adchan);
admac_reset_rings(adchan);
- adchan->current_tx = NULL;
+ if (adchan->current_tx) {
+ list_add_tail(&adchan->current_tx->node, &adchan->to_free);
+ adchan->current_tx = NULL;
+ }
/*
* Descriptors can only be freed after the tasklet
* has been killed (in admac_synchronize).
@@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
static irqreturn_t admac_interrupt(int irq, void *devid)
{
struct admac_data *ad = devid;
- u32 rx_intstate, tx_intstate;
+ u32 rx_intstate, tx_intstate, global_intstate;
int i;
rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+ global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
- if (!tx_intstate && !rx_intstate)
+ if (!tx_intstate && !rx_intstate && !global_intstate)
return IRQ_NONE;
for (i = 0; i < ad->nchannels; i += 2) {
@@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
rx_intstate >>= 1;
}
+ if (global_intstate) {
+ dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
+ global_intstate);
+ writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
+ }
+
return IRQ_HANDLED;
}
@@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c24bca210104..826b98284fa1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1342,7 +1342,7 @@ int dmaenginem_async_device_register(struct dma_device *device)
if (ret)
return ret;
- return devm_add_action(device->dev, dmaenginem_async_device_unregister, device);
+ return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
}
EXPORT_SYMBOL(dmaenginem_async_device_register);
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 462109c61653..93ee298d52b8 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -277,7 +277,7 @@ failed:
/**
* xdma_xfer_start - Start DMA transfer
- * @xdma_chan: DMA channel pointer
+ * @xchan: DMA channel pointer
*/
static int xdma_xfer_start(struct xdma_chan *xchan)
{
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 29619f49873a..d9629ff87861 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -167,7 +167,8 @@ int psci_set_osi_mode(bool enable)
err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
if (err < 0)
- pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err);
+ pr_info(FW_BUG "failed to set %s mode: %d\n",
+ enable ? "OSI" : "PC", err);
return psci_to_linux_errno(err);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 13be729710f2..badbe0582318 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -100,7 +100,7 @@ config GPIO_GENERIC
tristate
config GPIO_REGMAP
- depends on REGMAP
+ select REGMAP
tristate
# put drivers in the right section, in alphabetical order
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 26b1f7465e09..43b2dc8821e6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -324,7 +324,7 @@ static struct irq_chip gpio_irqchip = {
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -641,9 +641,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
context->set_falling = readl_relaxed(&g->set_falling);
}
- /* Clear Bank interrupt enable bit */
- writel_relaxed(0, base + BINTEN);
-
/* Clear all interrupt status registers */
writel_relaxed(GENMASK(31, 0), &g->intstat);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1583157da355..efd025d8961e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
const struct dc_link *link)
{}
+static void dm_helpers_construct_old_payload(
+ struct dc_link *link,
+ int pbn_per_slot,
+ struct drm_dp_mst_atomic_payload *new_payload,
+ struct drm_dp_mst_atomic_payload *old_payload)
+{
+ struct link_mst_stream_allocation_table current_link_table =
+ link->mst_stream_alloc_table;
+ struct link_mst_stream_allocation *dc_alloc;
+ int i;
+
+ *old_payload = *new_payload;
+
+ /* Set correct time_slots/PBN of old payload.
+ * other fields (delete & dsc_enabled) in
+ * struct drm_dp_mst_atomic_payload are don't care fields
+ * while calling drm_dp_remove_payload()
+ */
+ for (i = 0; i < current_link_table.stream_count; i++) {
+ dc_alloc =
+ &current_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == new_payload->vcpi) {
+ old_payload->time_slots = dc_alloc->slot_count;
+ old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
+ break;
+ }
+ }
+
+ /* make sure there is an old payload*/
+ ASSERT(i != current_link_table.stream_count);
+
+}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- if (enable)
- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
- else
- drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
+ if (enable) {
+ target_payload = new_payload;
+
+ drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
+ } else {
+ /* construct old payload by VCPI*/
+ dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
+ new_payload, &old_payload);
+ target_payload = &old_payload;
+
+ drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
+ }
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
- fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index f085cb97a620..85a090b9e3d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -61,6 +61,12 @@
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5
+static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
+static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
+
+#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx])
+#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx])
+
struct smu_13_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 27448ffe60a4..a5c97d61e92a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1144,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
- ((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
- (lane_width == link_width[pcie_table->pcie_lane[i]]) ?
+ (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
"*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9e1967d8049e..4399416dd9b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
dpm_table);
if (ret)
return ret;
+
+ if (skutable->DriverReportedClocks.GameClockAc &&
+ (dpm_table->dpm_levels[dpm_table->count - 1].value >
+ skutable->DriverReportedClocks.GameClockAc)) {
+ dpm_table->dpm_levels[dpm_table->count - 1].value =
+ skutable->DriverReportedClocks.GameClockAc;
+ dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+ }
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
@@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ struct smu_13_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *dpm_table;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ /* uclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.uclk_table;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* gfxclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ break;
+ case SMU_SOCCLK:
+ /* socclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.soc_table;
+ break;
+ case SMU_FCLK:
+ /* fclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.fclk_table;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ /* vclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ /* dclk dpm table */
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Unsupported clock type!\n");
+ return -EINVAL;
+ }
+
+ if (min)
+ *min = dpm_table->min;
+ if (max)
+ *max = dpm_table->max;
+
+ return 0;
+}
+
static int smu_v13_0_7_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
@@ -1074,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 5) ? "x12" :
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
- (gen_speed == pcie_table->pcie_gen[i]) &&
- (lane_width == pcie_table->pcie_lane[i]) ?
+ (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+ (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;
@@ -1329,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ DriverReportedClocks_t driver_clocks =
+ pptable->SkuTable.DriverReportedClocks;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ if (driver_clocks.GameClockAc &&
+ (driver_clocks.GameClockAc < gfx_table->max))
+ pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+ else
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
@@ -1348,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
pstate_table->fclk_pstate.min = fclk_table->min;
pstate_table->fclk_pstate.peak = fclk_table->max;
- /*
- * For now, just use the mininum clock frequency.
- * TODO: update them when the real pstate settings available
- */
- pstate_table->gfxclk_pstate.standard = gfx_table->min;
- pstate_table->uclk_pstate.standard = mem_table->min;
+ if (driver_clocks.BaseClockAc &&
+ driver_clocks.BaseClockAc < gfx_table->max)
+ pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
+ else
+ pstate_table->gfxclk_pstate.standard = gfx_table->max;
+ pstate_table->uclk_pstate.standard = mem_table->max;
pstate_table->socclk_pstate.standard = soc_table->min;
pstate_table->vclk_pstate.standard = vclk_table->min;
pstate_table->dclk_pstate.standard = dclk_table->min;
@@ -1676,7 +1743,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
.read_sensor = smu_v13_0_7_read_sensor,
.feature_is_enabled = smu_cmn_feature_is_enabled,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 0643887800b4..142668cd6d7c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
- kfree(priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 468a792e6a40..fc0eaf40dc94 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -300,9 +300,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -323,16 +335,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
+ dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 2106b3de225a..7c9b328bc2d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -232,7 +232,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
return slots;
}
- intel_link_compute_m_n(crtc_state->pipe_bpp,
+ intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
crtc_state->lane_count,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 7420276827a5..4758f21c91e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1067,11 +1067,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
+ int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
- ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
- if (ret) {
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 1bbe6708d0a7..750326434677 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* inspecting the queue to see if we need to resumbit.
*/
if (*prev != *execlists->active) { /* elide lite-restores */
+ struct intel_context *prev_ce = NULL, *active_ce = NULL;
+
/*
* Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU
@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and correct overselves later when updating from HW.
*/
if (*prev)
- lrc_runtime_stop((*prev)->context);
+ prev_ce = (*prev)->context;
if (*execlists->active)
- lrc_runtime_start((*execlists->active)->context);
+ active_ce = (*execlists->active)->context;
+ if (prev_ce != active_ce) {
+ if (prev_ce)
+ lrc_runtime_stop(prev_ce);
+ if (active_ce)
+ lrc_runtime_start(active_ce);
+ }
new_timeslice(execlists);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 410905da8e97..0c103ca160d1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -235,6 +235,13 @@ static void delayed_huc_load_fini(struct intel_huc *huc)
i915_sw_fence_fini(&huc->delayed_load.fence);
}
+int intel_huc_sanitize(struct intel_huc *huc)
+{
+ delayed_huc_load_complete(huc);
+ intel_uc_fw_sanitize(&huc->fw);
+ return 0;
+}
+
static bool vcs_supported(struct intel_gt *gt)
{
intel_engine_mask_t mask = gt->info.engine_mask;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 52db03620c60..db555b3c1f56 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,6 +41,7 @@ struct intel_huc {
} delayed_load;
};
+int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
@@ -54,12 +55,6 @@ bool intel_huc_is_authenticated(struct intel_huc *huc);
void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
-static inline int intel_huc_sanitize(struct intel_huc *huc)
-{
- intel_uc_fw_sanitize(&huc->fw);
- return 0;
-}
-
static inline bool intel_huc_is_supported(struct intel_huc *huc)
{
return intel_uc_fw_is_supported(&huc->fw);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 283a4a3c6862..004074936300 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4638,13 +4638,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
err = oa_config->id;
goto sysfs_err;
}
-
- mutex_unlock(&perf->metrics_lock);
+ id = oa_config->id;
drm_dbg(&perf->i915->drm,
"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+ mutex_unlock(&perf->metrics_lock);
- return oa_config->id;
+ return id;
sysfs_err:
mutex_unlock(&perf->metrics_lock);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index ed9d374147b8..5bb777ff1313 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -363,6 +363,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
return 0;
}
+static void
+nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
+{
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+ unsigned int max_rate, mode_rate;
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_DP:
+ max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
+
+ /* we don't support more than 10 anyway */
+ asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
+
+ /* reduce the bpc until it works out */
+ while (asyh->or.bpc > 6) {
+ mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
+ if (mode_rate <= max_rate)
+ break;
+
+ asyh->or.bpc -= 2;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static int
nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -381,6 +410,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
if (crtc_state->mode_changed || crtc_state->connectors_changed)
asyh->or.bpc = connector->display_info.bpc;
+ /* We might have to reduce the bpc */
+ nv50_outp_atomic_fix_depth(encoder, crtc_state);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index e00876f92aee..d49b4875fc3c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -263,8 +263,6 @@ nouveau_dp_irq(struct work_struct *work)
}
/* TODO:
- * - Use the minimum possible BPC here, once we add support for the max bpc
- * property.
* - Validate against the DP caps advertised by the GPU (we don't check these
* yet)
*/
@@ -276,7 +274,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
{
const unsigned int min_clock = 25000;
unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
- const u8 bpp = connector->display_info.bpc * 3;
+ /* Check with the minmum bpc always, so we can advertise better modes.
+ * In particlar not doing this causes modes to be dropped on HDR
+ * displays as we might check with a bpc of 16 even.
+ */
+ const u8 bpp = 6 * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 76678dd60f93..c4c6f67af7cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -31,6 +31,7 @@ gf108_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gf108_ram_new,
.default_bigpage = 17,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index f73442ccb424..433fa966ba23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -77,6 +77,7 @@ gk104_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk104_fb_clkgate_pack,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
index 45d6cdffafee..4dc283dedf8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -59,6 +59,7 @@ gk110_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk110_fb_clkgate_pack,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index de52462a92bf..90bfff616d35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -31,6 +31,7 @@ gm107_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gm107_ram_new,
.default_bigpage = 17,
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 666a5e53fe19..e961fa27702c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
+ pages[i] = NULL;
goto err_pages;
}
}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 15d04a0ec623..e0a8890a62e2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -507,12 +507,19 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
bool first;
+ ktime_t submit_ts;
trace_drm_sched_job(sched_job, entity);
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
+
+ /*
+ * After the sched_job is pushed into the entity queue, it may be
+ * completed and freed up at any time. We can no longer access it.
+ * Make sure to set the submit_ts first, to avoid a race.
+ */
+ sched_job->submit_ts = submit_ts = ktime_get();
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
- sched_job->submit_ts = ktime_get();
/* first job wakes up scheduler */
if (first) {
@@ -529,7 +536,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
+ drm_sched_rq_update_fifo(entity, submit_ts);
drm_sched_wakeup(entity->rq->sched);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 82f64fb31fda..4ce012f83253 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1122,7 +1122,7 @@ config HID_TOPRE
tristate "Topre REALFORCE keyboards"
depends on HID
help
- Say Y for N-key rollover support on Topre REALFORCE R2 108 key keyboards.
+ Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 8a034a555d4c..d9ef45fcaeab 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -342,9 +342,6 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
{
struct hid_bpf_ctx_kern *ctx_kern;
- if (!ctx)
- return;
-
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
kfree(ctx_kern);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 63545cd307e5..c2e9b6d1fd7d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -420,6 +420,9 @@
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -1249,6 +1252,7 @@
#define USB_VENDOR_ID_TOPRE 0x0853
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146
#define USB_VENDOR_ID_TOPSEED 0x0766
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7fc967964dd8..5c65a584b3fa 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -398,6 +398,12 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 3e3f89e01d81..d85398721659 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -940,7 +940,7 @@ hid_sensor_register_platform_device(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
const struct hid_sensor_custom_match *match)
{
- char real_usage[HID_SENSOR_USAGE_LENGTH];
+ char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
struct platform_device *custom_pdev;
const char *dev_name;
char *c;
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
index 88a91cdad5f8..d1d5ca310ead 100644
--- a/drivers/hid/hid-topre.c
+++ b/drivers/hid/hid-topre.c
@@ -36,6 +36,8 @@ static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id topre_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
{ }
};
MODULE_DEVICE_TABLE(hid, topre_id_table);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 81385ab37fa9..7fc738a22375 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
- return guid_equal(&driver->id[0].guid,
- &device->fw_client->props.protocol_name);
+ return(device->fw_client ? guid_equal(&driver->id[0].guid,
+ &device->fw_client->props.protocol_name) : 0);
}
/**
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 9dc27e5d367a..da51b50787df 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -409,6 +409,10 @@ void vmbus_disconnect(void)
*/
struct vmbus_channel *relid2channel(u32 relid)
{
+ if (vmbus_connection.channels == NULL) {
+ pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
+ return NULL;
+ }
if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
return NULL;
return READ_ONCE(vmbus_connection.channels[relid]);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index d193ed3cb35e..c7ebef1990c8 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -174,7 +174,7 @@ static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int
struct hwmon_thermal_data *tdata = tz->devdata;
struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
const struct hwmon_chip_info *chip = hwdev->chip;
- const struct hwmon_channel_info **info = chip->info;
+ const struct hwmon_channel_info * const *info = chip->info;
unsigned int i;
int err;
@@ -253,7 +253,7 @@ static int hwmon_thermal_register_sensors(struct device *dev)
{
struct hwmon_device *hwdev = to_hwmon_device(dev);
const struct hwmon_chip_info *chip = hwdev->chip;
- const struct hwmon_channel_info **info = chip->info;
+ const struct hwmon_channel_info * const *info = chip->info;
void *drvdata = dev_get_drvdata(dev);
int i;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 1ea8f173cca0..4c15fae534f3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -472,7 +472,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
if (etm4x_sspcicrn_present(drvdata, i))
etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
@@ -1070,25 +1070,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
- u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
/*
* All ETMs must implement TRCDEVARCH to indicate that
- * the component is an ETMv4. To support any broken
- * implementations we fall back to TRCIDR1 check, which
- * is not really reliable.
+ * the component is an ETMv4. Even though TRCIDR1 also
+ * contains the information, it is part of the "Trace"
+ * register and must be accessed with the OSLK cleared,
+ * with MMIO. But we cannot touch the OSLK until we are
+ * sure this is an ETM. So rely only on the TRCDEVARCH.
*/
- if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
- drvdata->arch = etm_devarch_to_arch(devarch);
- } else {
- pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
- smp_processor_id(), devarch);
-
- if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
- return false;
- drvdata->arch = etm_trcidr_to_arch(idr1);
+ if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
+ pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
+ return false;
}
+ drvdata->arch = etm_devarch_to_arch(devarch);
*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
return true;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 434f4e95ee17..27c8a9901868 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -753,14 +753,12 @@
* TRCDEVARCH - CoreSight architected register
* - Bits[15:12] - Major version
* - Bits[19:16] - Minor version
- * TRCIDR1 - ETM architected register
- * - Bits[11:8] - Major version
- * - Bits[7:4] - Minor version
- * We must rely on TRCDEVARCH for the version information,
- * however we don't want to break the support for potential
- * old implementations which might not implement it. Thus
- * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
- * for memory mapped components.
+ *
+ * We must rely only on TRCDEVARCH for the version information. Even though,
+ * TRCIDR1 also provides the architecture version, it is a "Trace" register
+ * and as such must be accessed only with Trace power domain ON. This may
+ * not be available at probe time.
+ *
* Now to make certain decisions easier based on the version
* we use an internal representation of the version in the
* driver, as follows :
@@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
ETM_DEVARCH_REVISION(devarch));
}
-static inline u8 etm_trcidr_to_arch(u32 trcidr1)
-{
- return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
- ETM_TRCIDR1_ARCH_MINOR(trcidr1));
-}
-
enum etm_impdef_type {
ETM4_IMPDEF_HISI_CORE_COMMIT,
ETM4_IMPDEF_FEATURE_MAX,
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
index 09af75921147..b21ffd6df927 100644
--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -48,9 +48,9 @@
* SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
* baud clock required to program 'Hold Time' at X KHz.
*/
-#define SR_HOLD_TIME_100K_TICKS 133
-#define SR_HOLD_TIME_400K_TICKS 20
-#define SR_HOLD_TIME_1000K_TICKS 11
+#define SR_HOLD_TIME_100K_TICKS 150
+#define SR_HOLD_TIME_400K_TICKS 20
+#define SR_HOLD_TIME_1000K_TICKS 12
#define SMB_CORE_COMPLETION_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x23)
@@ -65,17 +65,17 @@
* the baud clock required to program 'fair idle delay' at X KHz. Fair idle
* delay establishes the MCTP T(IDLE_DELAY) period.
*/
-#define FAIR_BUS_IDLE_MIN_100K_TICKS 969
-#define FAIR_BUS_IDLE_MIN_400K_TICKS 157
-#define FAIR_BUS_IDLE_MIN_1000K_TICKS 157
+#define FAIR_BUS_IDLE_MIN_100K_TICKS 992
+#define FAIR_BUS_IDLE_MIN_400K_TICKS 500
+#define FAIR_BUS_IDLE_MIN_1000K_TICKS 500
/*
* FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
* baud clock required to satisfy the fairness protocol at X KHz.
*/
-#define FAIR_IDLE_DELAY_100K_TICKS 1000
-#define FAIR_IDLE_DELAY_400K_TICKS 500
-#define FAIR_IDLE_DELAY_1000K_TICKS 500
+#define FAIR_IDLE_DELAY_100K_TICKS 963
+#define FAIR_IDLE_DELAY_400K_TICKS 156
+#define FAIR_IDLE_DELAY_1000K_TICKS 156
#define SMB_IDLE_SCALING_100K \
((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
@@ -105,7 +105,7 @@
*/
#define BUS_CLK_100K_LOW_PERIOD_TICKS 156
#define BUS_CLK_400K_LOW_PERIOD_TICKS 41
-#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
+#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
/*
* BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
@@ -131,7 +131,7 @@
*/
#define CLK_SYNC_100K 4
#define CLK_SYNC_400K 4
-#define CLK_SYNC_1000K 4
+#define CLK_SYNC_1000K 4
#define SMB_CORE_DATA_TIMING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x40)
@@ -142,25 +142,25 @@
* determines the SCLK hold time following SDAT driven low during the first
* START bit in a transfer.
*/
-#define FIRST_START_HOLD_100K_TICKS 22
-#define FIRST_START_HOLD_400K_TICKS 16
-#define FIRST_START_HOLD_1000K_TICKS 6
+#define FIRST_START_HOLD_100K_TICKS 23
+#define FIRST_START_HOLD_400K_TICKS 8
+#define FIRST_START_HOLD_1000K_TICKS 12
/*
* STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
* required to program 'STOP_SETUP' timer at X KHz. This timer determines the
* SDAT setup time from the rising edge of SCLK for a STOP condition.
*/
-#define STOP_SETUP_100K_TICKS 157
+#define STOP_SETUP_100K_TICKS 150
#define STOP_SETUP_400K_TICKS 20
-#define STOP_SETUP_1000K_TICKS 12
+#define STOP_SETUP_1000K_TICKS 12
/*
* RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
* required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
* SDAT setup time from the rising edge of SCLK for a repeated START condition.
*/
-#define RESTART_SETUP_100K_TICKS 157
+#define RESTART_SETUP_100K_TICKS 156
#define RESTART_SETUP_400K_TICKS 20
#define RESTART_SETUP_1000K_TICKS 12
@@ -169,7 +169,7 @@
* required to program 'DATA_HOLD' timer at X KHz. This timer determines the
* SDAT hold time following SCLK driven low.
*/
-#define DATA_HOLD_100K_TICKS 2
+#define DATA_HOLD_100K_TICKS 12
#define DATA_HOLD_400K_TICKS 2
#define DATA_HOLD_1000K_TICKS 2
@@ -190,35 +190,35 @@
* Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
* (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
*/
-#define BUS_IDLE_MIN_100K_TICKS 167UL
-#define BUS_IDLE_MIN_400K_TICKS 139UL
-#define BUS_IDLE_MIN_1000K_TICKS 133UL
+#define BUS_IDLE_MIN_100K_TICKS 36UL
+#define BUS_IDLE_MIN_400K_TICKS 10UL
+#define BUS_IDLE_MIN_1000K_TICKS 4UL
/*
* CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
* SMBus Controller Cumulative Time-Out duration =
* CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
*/
-#define CTRL_CUM_TIME_OUT_100K_TICKS 159
-#define CTRL_CUM_TIME_OUT_400K_TICKS 159
-#define CTRL_CUM_TIME_OUT_1000K_TICKS 159
+#define CTRL_CUM_TIME_OUT_100K_TICKS 76
+#define CTRL_CUM_TIME_OUT_400K_TICKS 76
+#define CTRL_CUM_TIME_OUT_1000K_TICKS 76
/*
* TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
* SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
* Baud_Clock_Period x 4096
*/
-#define TARGET_CUM_TIME_OUT_100K_TICKS 199
-#define TARGET_CUM_TIME_OUT_400K_TICKS 199
-#define TARGET_CUM_TIME_OUT_1000K_TICKS 199
+#define TARGET_CUM_TIME_OUT_100K_TICKS 95
+#define TARGET_CUM_TIME_OUT_400K_TICKS 95
+#define TARGET_CUM_TIME_OUT_1000K_TICKS 95
/*
* CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
* Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
*/
-#define CLOCK_HIGH_TIME_OUT_100K_TICKS 204
-#define CLOCK_HIGH_TIME_OUT_400K_TICKS 204
-#define CLOCK_HIGH_TIME_OUT_1000K_TICKS 204
+#define CLOCK_HIGH_TIME_OUT_100K_TICKS 97
+#define CLOCK_HIGH_TIME_OUT_400K_TICKS 97
+#define CLOCK_HIGH_TIME_OUT_1000K_TICKS 97
#define TO_SCALING_100K \
((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index a0af027db04c..2e575856c5cd 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -342,18 +342,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
* ocores_isr(), we just add our polling code around it.
*
* It can run in atomic context
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout
*/
-static void ocores_process_polling(struct ocores_i2c *i2c)
+static int ocores_process_polling(struct ocores_i2c *i2c)
{
- while (1) {
- irqreturn_t ret;
- int err;
+ irqreturn_t ret;
+ int err = 0;
+ while (1) {
err = ocores_poll_wait(i2c);
- if (err) {
- i2c->state = STATE_ERROR;
+ if (err)
break; /* timeout */
- }
ret = ocores_isr(-1, i2c);
if (ret == IRQ_NONE)
@@ -364,13 +364,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
break;
}
}
+
+ return err;
}
static int ocores_xfer_core(struct ocores_i2c *i2c,
struct i2c_msg *msgs, int num,
bool polling)
{
- int ret;
+ int ret = 0;
u8 ctrl;
ctrl = oc_getreg(i2c, OCI2C_CONTROL);
@@ -388,15 +390,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
if (polling) {
- ocores_process_polling(i2c);
+ ret = ocores_process_polling(i2c);
} else {
- ret = wait_event_timeout(i2c->wait,
- (i2c->state == STATE_ERROR) ||
- (i2c->state == STATE_DONE), HZ);
- if (ret == 0) {
- ocores_process_timeout(i2c);
- return -ETIMEDOUT;
- }
+ if (wait_event_timeout(i2c->wait,
+ (i2c->state == STATE_ERROR) ||
+ (i2c->state == STATE_DONE), HZ) == 0)
+ ret = -ETIMEDOUT;
+ }
+ if (ret) {
+ ocores_process_timeout(i2c);
+ return ret;
}
return (i2c->state == STATE_DONE) ? num : -EIO;
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index bce6b796e04c..545436b7dd53 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -178,6 +178,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
client = of_i2c_register_device(adap, rd->dn);
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index f866859855cd..1c3a72380fb8 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -864,7 +864,7 @@ static irqreturn_t kx022a_trigger_handler(int irq, void *p)
if (ret < 0)
goto err_read;
- iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
err_read:
iio_trigger_notify_done(idev->trig);
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index fee8d129a5f0..86effe8501b4 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
.has_registers = true,
.addr_shift = 4,
.read_mask = BIT(3),
- .irq_flags = IRQF_TRIGGER_LOW,
+ .irq_flags = IRQF_TRIGGER_FALLING,
};
static int ad7791_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 17370c5eb6fe..ec198c6f13d6 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -28,7 +28,6 @@ struct ltc2497_driverdata {
struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
u32 recv_size;
- u32 sub_lsb;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
@@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
* equivalent to a sign extension.
*/
if (st->recv_size == 3) {
- *val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
+ *val = (get_unaligned_be24(st->data.d8) >> 6)
- BIT(ddata->chip_info->resolution + 1);
} else {
- *val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
+ *val = (be32_to_cpu(st->data.d32) >> 6)
- BIT(ddata->chip_info->resolution + 1);
}
@@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client)
st->common_ddata.chip_info = chip_info;
resolution = chip_info->resolution;
- st->sub_lsb = 31 - (resolution + 1);
st->recv_size = BITS_TO_BYTES(resolution) + 1;
return ltc2497core_probe(dev, indio_dev);
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index b74b689ee7de..f6895bc9fc4b 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -414,13 +414,17 @@ static int max11410_sample(struct max11410_state *st, int *sample_raw,
if (!ret)
return -ETIMEDOUT;
} else {
+ int ret2;
+
/* Wait for status register Conversion Ready flag */
- ret = read_poll_timeout(max11410_read_reg, ret,
- ret || (val & MAX11410_STATUS_CONV_READY_BIT),
+ ret = read_poll_timeout(max11410_read_reg, ret2,
+ ret2 || (val & MAX11410_STATUS_CONV_READY_BIT),
5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000,
true, st, MAX11410_REG_STATUS, &val);
if (ret)
return ret;
+ if (ret2)
+ return ret2;
}
/* Read ADC Data */
@@ -851,17 +855,21 @@ static int max11410_init_vref(struct device *dev,
static int max11410_calibrate(struct max11410_state *st, u32 cal_type)
{
- int ret, val;
+ int ret, ret2, val;
ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type);
if (ret)
return ret;
/* Wait for status register Calibration Ready flag */
- return read_poll_timeout(max11410_read_reg, ret,
- ret || (val & MAX11410_STATUS_CAL_READY_BIT),
- 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
- st, MAX11410_REG_STATUS, &val);
+ ret = read_poll_timeout(max11410_read_reg, ret2,
+ ret2 || (val & MAX11410_STATUS_CAL_READY_BIT),
+ 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
+ st, MAX11410_REG_STATUS, &val);
+ if (ret)
+ return ret;
+
+ return ret2;
}
static int max11410_self_calibrate(struct max11410_state *st)
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index fd000345ec5c..849a697a467e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -639,7 +639,7 @@ out:
static int palmas_gpadc_remove(struct platform_device *pdev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
if (adc->wakeup1_enable || adc->wakeup2_enable)
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index e90c299c913a..c2d5e06f137a 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -628,12 +628,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
struct fwnode_handle *fwnode,
const struct adc5_data *data)
{
- const char *name = fwnode_get_name(fwnode), *channel_name;
+ const char *channel_name;
+ char *name;
u32 chan, value, varr[2];
u32 sid = 0;
int ret;
struct device *dev = adc->dev;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
+ if (!name)
+ return -ENOMEM;
+
+ /* Cut the address part */
+ name[strchrnul(name, '@') - name] = '\0';
+
ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 2cc9a9bd9db6..263fc3a1b87e 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.label = dev_name(&st->spi->dev);
st->chip.parent = &st->spi->dev;
st->chip.owner = THIS_MODULE;
+ st->chip.can_sleep = true;
st->chip.base = -1;
st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
st->chip.get_direction = ti_ads7950_get_direction;
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index 791dd999cf29..18a64f72fc18 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
- /* DAC can only accept up to a 16-bit value */
- if ((unsigned int)val > 65535)
+ /* DAC can only accept up to a 12-bit value */
+ if ((unsigned int)val > 4095)
return -EINVAL;
priv->chan_out_states[chan->channel] = val;
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index f1d7d4b5e222..c2f97629e9cd 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -47,6 +47,7 @@ config ADIS16480
depends on SPI
select IIO_ADIS_LIB
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ select CRC32
help
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 80c78bd6bbef..a7a080bed180 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
break;
}
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!written)
+ ret = -EAGAIN;
+ break;
+ }
+
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
continue;
}
ret = rb->access->write(rb, n - written, buf + written);
- if (ret == 0 && (filp->f_flags & O_NONBLOCK))
- ret = -EAGAIN;
+ if (ret < 0)
+ break;
- if (ret > 0) {
- written += ret;
- if (written != n && !(filp->f_flags & O_NONBLOCK))
- continue;
- }
- } while (ret == 0);
+ written += ret;
+
+ } while (written != n);
remove_wait_queue(&rb->pollq, &wait);
- return ret < 0 ? ret : n;
+ return ret < 0 ? ret : written;
}
/**
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index b1674a5bfa36..d4a34a3bf00d 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
+static void cm32181_unregister_dummy_client(void *data)
+{
+ struct i2c_client *client = data;
+
+ /* Unregister the dummy client */
+ i2c_unregister_device(client);
+}
+
static int cm32181_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
client = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(client))
return PTR_ERR(client);
+
+ ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
+ if (ret)
+ return ret;
}
cm32181 = iio_priv(indio_dev);
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 6bdfce9747f9..5c44a36ab5b3 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -208,7 +208,6 @@ static int vcnl4000_init(struct vcnl4000_data *data)
data->rev = ret & 0xf;
data->al_scale = 250000;
- mutex_init(&data->vcnl4000_lock);
return data->chip_spec->set_power_state(data, true);
};
@@ -1367,6 +1366,8 @@ static int vcnl4000_probe(struct i2c_client *client)
data->id = id->driver_data;
data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
+ mutex_init(&data->vcnl4000_lock);
+
ret = data->chip_spec->init(data);
if (ret < 0)
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 308155937713..6b9563d4f23c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
return id_priv->id.route.addr.src_addr.ss_family;
}
-static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+static int cma_set_default_qkey(struct rdma_id_private *id_priv)
{
struct ib_sa_mcmember_rec rec;
int ret = 0;
- if (id_priv->qkey) {
- if (qkey && id_priv->qkey != qkey)
- return -EINVAL;
- return 0;
- }
-
- if (qkey) {
- id_priv->qkey = qkey;
- return 0;
- }
-
switch (id_priv->id.ps) {
case RDMA_PS_UDP:
case RDMA_PS_IB:
@@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
return ret;
}
+static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+{
+ if (!qkey ||
+ (id_priv->qkey && (id_priv->qkey != qkey)))
+ return -EINVAL;
+
+ id_priv->qkey = qkey;
+ return 0;
+}
+
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
dev_addr->dev_type = ARPHRD_INFINIBAND;
@@ -1229,7 +1228,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (id_priv->id.qp_type == IB_QPT_UD) {
- ret = cma_set_qkey(id_priv, 0);
+ ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
@@ -4569,7 +4568,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
memset(&rep, 0, sizeof rep);
rep.status = status;
if (status == IB_SIDR_SUCCESS) {
- ret = cma_set_qkey(id_priv, qkey);
+ if (qkey)
+ ret = cma_set_qkey(id_priv, qkey);
+ else
+ ret = cma_set_default_qkey(id_priv);
if (ret)
return ret;
rep.qp_num = id_priv->qp_num;
@@ -4774,9 +4776,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
enum ib_gid_type gid_type;
struct net_device *ndev;
- if (!status)
- status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
- else
+ if (status)
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
status);
@@ -4804,7 +4804,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
}
event->param.ud.qp_num = 0xFFFFFF;
- event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
+ event->param.ud.qkey = id_priv->qkey;
out:
if (ndev)
@@ -4823,8 +4823,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
goto out;
- cma_make_mc_event(status, id_priv, multicast, &event, mc);
- ret = cma_cm_event_handler(id_priv, &event);
+ ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+ if (!ret) {
+ cma_make_mc_event(status, id_priv, multicast, &event, mc);
+ ret = cma_cm_event_handler(id_priv, &event);
+ }
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
WARN_ON(ret);
@@ -4877,9 +4880,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret)
return ret;
- ret = cma_set_qkey(id_priv, 0);
- if (ret)
- return ret;
+ if (!id_priv->qkey) {
+ ret = cma_set_default_qkey(id_priv);
+ if (ret)
+ return ret;
+ }
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
rec.qkey = cpu_to_be32(id_priv->qkey);
@@ -4956,9 +4961,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
ib.rec.pkey = cpu_to_be16(0xffff);
- if (id_priv->id.ps == RDMA_PS_UDP)
- ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
-
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev)
@@ -4984,6 +4986,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (err || !ib.rec.mtu)
return err ?: -EINVAL;
+ if (!id_priv->qkey)
+ cma_set_default_qkey(id_priv);
+
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&ib.rec.port_gid);
INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
@@ -5009,6 +5014,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
return -EINVAL;
+ if (id_priv->id.qp_type != IB_QPT_UD)
+ return -EINVAL;
+
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 11b1c1603aeb..b99b3cc283b6 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -532,6 +532,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
else
ret = device->ops.create_ah(ah, &init_attr, NULL);
if (ret) {
+ if (ah->sgid_attr)
+ rdma_put_gid_attr(ah->sgid_attr);
kfree(ah);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index cabd8678b355..7bc354273d4e 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -65,7 +65,7 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
[ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
[ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
- [ERDMA_OP_ATOMIC_FAD] = IB_WC_FETCH_ADD,
+ [ERDMA_OP_ATOMIC_FAA] = IB_WC_FETCH_ADD,
};
static const struct {
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 4c38d99c73f1..37ad1bb1917c 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -441,7 +441,7 @@ struct erdma_reg_mr_sqe {
};
/* EQ related. */
-#define ERDMA_DEFAULT_EQ_DEPTH 256
+#define ERDMA_DEFAULT_EQ_DEPTH 4096
/* ceqe */
#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
@@ -491,7 +491,7 @@ enum erdma_opcode {
ERDMA_OP_LOCAL_INV = 15,
ERDMA_OP_READ_WITH_INV = 16,
ERDMA_OP_ATOMIC_CAS = 17,
- ERDMA_OP_ATOMIC_FAD = 18,
+ ERDMA_OP_ATOMIC_FAA = 18,
ERDMA_NUM_OPCODES = 19,
ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
};
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 5dc31e5df5cb..4a29a53a6652 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -56,7 +56,7 @@ done:
static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
{
struct net_device *netdev;
- int ret = -ENODEV;
+ int ret = -EPROBE_DEFER;
/* Already binded to a net_device, so we skip. */
if (dev->netdev)
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index d088d6bef431..44923c51a01b 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -405,7 +405,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
mr->mem.mtt_nents);
- if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
/* Copy SGLs to SQE content to accelerate */
memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
@@ -439,7 +439,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
cpu_to_le64(atomic_wr(send_wr)->compare_add);
} else {
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
- ERDMA_OP_ATOMIC_FAD);
+ ERDMA_OP_ATOMIC_FAA);
atomic_sqe->fetchadd_swap_data =
cpu_to_le64(atomic_wr(send_wr)->compare_add);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index e0a993bc032a..131cf5f40982 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -11,7 +11,7 @@
/* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024)
-#define ERDMA_MAX_SEND_WR 4096
+#define ERDMA_MAX_SEND_WR 8192
#define ERDMA_MAX_ORD 128
#define ERDMA_MAX_IRD 128
#define ERDMA_MAX_SGE_RD 1
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 195aa9ea18b6..8817864154af 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1458,13 +1458,15 @@ static int irdma_send_fin(struct irdma_cm_node *cm_node)
* irdma_find_listener - find a cm node listening on this addr-port pair
* @cm_core: cm's core
* @dst_addr: listener ip addr
+ * @ipv4: flag indicating IPv4 when true
* @dst_port: listener tcp port num
* @vlan_id: virtual LAN ID
* @listener_state: state to match with listen node's
*/
static struct irdma_cm_listener *
-irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
- u16 vlan_id, enum irdma_cm_listener_state listener_state)
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
+ u16 dst_port, u16 vlan_id,
+ enum irdma_cm_listener_state listener_state)
{
struct irdma_cm_listener *listen_node;
static const u32 ip_zero[4] = { 0, 0, 0, 0 };
@@ -1477,7 +1479,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
list_for_each_entry (listen_node, &cm_core->listen_list, list) {
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
listen_port = listen_node->loc_port;
- if (listen_port != dst_port ||
+ if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
!(listener_state & listen_node->listener_state))
continue;
/* compare node pair, return node handle if a match */
@@ -2902,9 +2904,10 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
unsigned long flags;
/* cannot have multiple matching listeners */
- listener = irdma_find_listener(cm_core, cm_info->loc_addr,
- cm_info->loc_port, cm_info->vlan_id,
- IRDMA_CM_LISTENER_EITHER_STATE);
+ listener =
+ irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
+ cm_info->loc_port, cm_info->vlan_id,
+ IRDMA_CM_LISTENER_EITHER_STATE);
if (listener &&
listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
refcount_dec(&listener->refcnt);
@@ -3153,6 +3156,7 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
listener = irdma_find_listener(cm_core,
cm_info.loc_addr,
+ cm_info.ipv4,
cm_info.loc_port,
cm_info.vlan_id,
IRDMA_CM_LISTENER_ACTIVE_STATE);
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
index 19c284975fc7..7feadb3e1eda 100644
--- a/drivers/infiniband/hw/irdma/cm.h
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -41,7 +41,7 @@
#define TCP_OPTIONS_PADDING 3
#define IRDMA_DEFAULT_RETRYS 64
-#define IRDMA_DEFAULT_RETRANS 8
+#define IRDMA_DEFAULT_RETRANS 32
#define IRDMA_DEFAULT_TTL 0x40
#define IRDMA_DEFAULT_RTT_VAR 6
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 2e1e2bad0401..43dfa4761f06 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -41,6 +41,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_XFFL,
IRDMA_HMC_IW_Q1,
IRDMA_HMC_IW_Q1FL,
+ IRDMA_HMC_IW_PBLE,
IRDMA_HMC_IW_TIMER,
IRDMA_HMC_IW_FSIMC,
IRDMA_HMC_IW_FSIAV,
@@ -827,6 +828,8 @@ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
info.entry_type = rf->sd_type;
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
+ continue;
if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 445e69e86409..7887230c867b 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2595,7 +2595,10 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
/* remove the SQ WR by moving SQ tail*/
IRDMA_RING_SET_TAIL(*sq_ring,
sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
-
+ if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
+ kfree(cmpl);
+ continue;
+ }
ibdev_dbg(iwqp->iwscq->ibcq.device,
"DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5b988db66b8f..5d45de223c43 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -442,6 +442,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_HDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index f642ec8e92dd..29131f1a2f06 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -781,9 +781,6 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
input_report_key(dev, BTN_C, data[8]);
input_report_key(dev, BTN_Z, data[9]);
- /* Profile button has a value of 0-3, so it is reported as an axis */
- if (xpad->mapping & MAP_PROFILE_BUTTON)
- input_report_abs(dev, ABS_PROFILE, data[34]);
input_sync(dev);
}
@@ -1061,6 +1058,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
(__u16) le16_to_cpup((__le16 *)(data + 8)));
}
+ /* Profile button has a value of 0-3, so it is reported as an axis */
+ if (xpad->mapping & MAP_PROFILE_BUTTON)
+ input_report_abs(dev, ABS_PROFILE, data[34]);
+
/* paddle handling */
/* based on SDL's SDL_hidapi_xboxone.c */
if (xpad->mapping & MAP_PADDLES) {
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 989228b5a0a4..e2c11d9f3868 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
x = y = z = 0;
/* Divide 4 since trackpoint's speed is too fast */
- input_report_rel(dev2, REL_X, (char)x / 4);
- input_report_rel(dev2, REL_Y, -((char)y / 4));
+ input_report_rel(dev2, REL_X, (s8)x / 4);
+ input_report_rel(dev2, REL_Y, -((s8)y / 4));
psmouse_report_standard_buttons(dev2, packet[3]);
@@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
((packet[3] & 0x20) << 1);
z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
- input_report_rel(dev2, REL_X, (char)x);
- input_report_rel(dev2, REL_Y, -((char)y));
+ input_report_rel(dev2, REL_X, (s8)x);
+ input_report_rel(dev2, REL_Y, -((s8)y));
input_report_abs(dev2, ABS_PRESSURE, z);
psmouse_report_standard_buttons(dev2, packet[1]);
@@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
if (reg < 0)
return reg;
- x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
- y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
+ y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
if (reg < 0)
return reg;
- x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = 17 + x_electrode;
- y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
+ y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
y_electrode = 13 + y_electrode;
x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 6fd5fff0cbff..c74b99077d16 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
state->pressed = packet[0] >> 7;
finger1 = ((packet[0] >> 4) & 0x7) - 1;
if (finger1 < FOC_MAX_FINGERS) {
- state->fingers[finger1].x += (char)packet[1];
- state->fingers[finger1].y += (char)packet[2];
+ state->fingers[finger1].x += (s8)packet[1];
+ state->fingers[finger1].y += (s8)packet[2];
} else {
psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
finger1);
@@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
*/
finger2 = ((packet[3] >> 4) & 0x7) - 1;
if (finger2 < FOC_MAX_FINGERS) {
- state->fingers[finger2].x += (char)packet[4];
- state->fingers[finger2].y += (char)packet[5];
+ state->fingers[finger2].x += (s8)packet[4];
+ state->fingers[finger2].y += (s8)packet[5];
}
}
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index efc61736099b..028e45bd050b 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -611,6 +611,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu Lifebook A574/H */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
/* Gigabyte M912 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
@@ -1117,6 +1125,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+ * the keyboard very laggy for ~5 seconds after boot and
+ * sometimes also after resume.
+ * However both are required for the keyboard to not fail
+ * completely sometimes after boot or resume.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
@@ -1124,6 +1146,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+ * the keyboard very laggy for ~5 seconds after boot and
+ * sometimes also after resume.
+ * However both are required for the keyboard to not fail
+ * completely sometimes after boot or resume.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b348172f19c3..d77f116680a0 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -124,10 +124,18 @@ static const unsigned long goodix_irq_flags[] = {
static const struct dmi_system_id nine_bytes_report[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
- .ident = "Lenovo YogaBook",
- /* YB1-X91L/F and YB1-X90L/F */
+ /* Lenovo Yoga Book X90F / X90L */
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ }
+ },
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
}
},
#endif
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 483aaaeb6dae..1abd187c6075 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1415,23 +1415,26 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
return &data->iommu;
}
-static void exynos_iommu_release_device(struct device *dev)
+static void exynos_iommu_set_platform_dma(struct device *dev)
{
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
- struct sysmmu_drvdata *data;
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
if (group) {
-#ifndef CONFIG_ARM
- WARN_ON(owner->domain !=
- iommu_group_default_domain(group));
-#endif
exynos_iommu_detach_device(owner->domain, dev);
iommu_group_put(group);
}
}
+}
+
+static void exynos_iommu_release_device(struct device *dev)
+{
+ struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
+ struct sysmmu_drvdata *data;
+
+ exynos_iommu_set_platform_dma(dev);
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
@@ -1479,7 +1482,7 @@ static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc,
.device_group = generic_device_group,
#ifdef CONFIG_ARM
- .set_platform_dma_ops = exynos_iommu_release_device,
+ .set_platform_dma_ops = exynos_iommu_set_platform_dma,
#endif
.probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device,
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 6acfe879589c..23828d189c2a 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1071,7 +1071,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
- if (cap_sagaw(iommu->cap) == 0) {
+ if (!cap_sagaw(iommu->cap) &&
+ (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
iommu->name);
drhd->ignored = 1;
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d6df3b865812..694ab9b7d3e9 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -641,6 +641,8 @@ struct iommu_pmu {
DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
struct perf_event *event_list[IOMMU_PMU_IDX_MAX];
unsigned char irq_name[16];
+ struct hlist_node cpuhp_node;
+ int cpu;
};
#define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED)
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 6d01fa078c36..df9e261af0b5 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -311,14 +311,12 @@ static int set_ioapic_sid(struct irte *irte, int apic)
if (!irte)
return -1;
- down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
}
- up_read(&dmar_global_lock);
if (sid == 0) {
pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
@@ -338,14 +336,12 @@ static int set_hpet_sid(struct irte *irte, u8 id)
if (!irte)
return -1;
- down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
}
- up_read(&dmar_global_lock);
if (sid == 0) {
pr_warn("Failed to set source-id of HPET block (%d)\n", id);
@@ -1339,9 +1335,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
if (!data)
goto out_free_parent;
- down_read(&dmar_global_lock);
index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
- up_read(&dmar_global_lock);
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
kfree(data);
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index e17d9743a0d8..cf43e798eca4 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -773,19 +773,34 @@ static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
iommu->perf_irq = 0;
}
-static int iommu_pmu_cpu_online(unsigned int cpu)
+static int iommu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
+ struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
+
if (cpumask_empty(&iommu_pmu_cpu_mask))
cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask);
+ if (cpumask_test_cpu(cpu, &iommu_pmu_cpu_mask))
+ iommu_pmu->cpu = cpu;
+
return 0;
}
-static int iommu_pmu_cpu_offline(unsigned int cpu)
+static int iommu_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- int target;
+ struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
+ int target = cpumask_first(&iommu_pmu_cpu_mask);
+
+ /*
+ * The iommu_pmu_cpu_mask has been updated when offline the CPU
+ * for the first iommu_pmu. Migrate the other iommu_pmu to the
+ * new target.
+ */
+ if (target < nr_cpu_ids && target != iommu_pmu->cpu) {
+ perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
+ iommu_pmu->cpu = target;
+ return 0;
+ }
if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask))
return 0;
@@ -795,45 +810,50 @@ static int iommu_pmu_cpu_offline(unsigned int cpu)
if (target < nr_cpu_ids)
cpumask_set_cpu(target, &iommu_pmu_cpu_mask);
else
- target = -1;
+ return 0;
- rcu_read_lock();
-
- for_each_iommu(iommu, drhd) {
- if (!iommu->pmu)
- continue;
- perf_pmu_migrate_context(&iommu->pmu->pmu, cpu, target);
- }
- rcu_read_unlock();
+ perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
+ iommu_pmu->cpu = target;
return 0;
}
static int nr_iommu_pmu;
+static enum cpuhp_state iommu_cpuhp_slot;
static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
{
int ret;
- if (nr_iommu_pmu++)
- return 0;
+ if (!nr_iommu_pmu) {
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "driver/iommu/intel/perfmon:online",
+ iommu_pmu_cpu_online,
+ iommu_pmu_cpu_offline);
+ if (ret < 0)
+ return ret;
+ iommu_cpuhp_slot = ret;
+ }
- ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE,
- "driver/iommu/intel/perfmon:online",
- iommu_pmu_cpu_online,
- iommu_pmu_cpu_offline);
- if (ret)
- nr_iommu_pmu = 0;
+ ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
+ if (ret) {
+ if (!nr_iommu_pmu)
+ cpuhp_remove_multi_state(iommu_cpuhp_slot);
+ return ret;
+ }
+ nr_iommu_pmu++;
- return ret;
+ return 0;
}
static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
{
+ cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
+
if (--nr_iommu_pmu)
return;
- cpuhp_remove_state(CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE);
+ cpuhp_remove_multi_state(iommu_cpuhp_slot);
}
void iommu_pmu_register(struct intel_iommu *iommu)
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index f8d92c9bb65b..3c47846cc5ef 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -294,9 +294,9 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
batch->npfns[batch->end - 1] < keep_pfns);
batch->total_pfns = keep_pfns;
- batch->npfns[0] = keep_pfns;
batch->pfns[0] = batch->pfns[batch->end - 1] +
(batch->npfns[batch->end - 1] - keep_pfns);
+ batch->npfns[0] = keep_pfns;
batch->end = 0;
}
@@ -1142,6 +1142,7 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
bool writable)
{
struct iopt_pages *pages;
+ unsigned long end;
/*
* The iommu API uses size_t as the length, and protect the DIV_ROUND_UP
@@ -1150,6 +1151,9 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
if (length > SIZE_MAX - PAGE_SIZE || length == 0)
return ERR_PTR(-EINVAL);
+ if (check_add_overflow((unsigned long)uptr, length, &end))
+ return ERR_PTR(-EOVERFLOW);
+
pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
if (!pages)
return ERR_PTR(-ENOMEM);
@@ -1203,13 +1207,21 @@ iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
unsigned long start =
max(start_index, *unmapped_end_index);
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+ batch->total_pfns)
+ WARN_ON(*unmapped_end_index -
+ batch->total_pfns !=
+ start_index);
batch_from_domain(batch, domain, area, start,
last_index);
- batch_last_index = start + batch->total_pfns - 1;
+ batch_last_index = start_index + batch->total_pfns - 1;
} else {
batch_last_index = last_index;
}
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(batch_last_index > real_last_index);
+
/*
* unmaps must always 'cut' at a place where the pfns are not
* contiguous to pair with the maps that always install
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 39e49e5d7182..13321dbb5fbc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6260,7 +6260,6 @@ static void __md_stop(struct mddev *mddev)
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- percpu_ref_exit(&mddev->writes_pending);
percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -6273,6 +6272,7 @@ void md_stop(struct mddev *mddev)
*/
__md_stop_writes(mddev);
__md_stop(mddev);
+ percpu_ref_exit(&mddev->writes_pending);
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -7843,6 +7843,7 @@ static void md_free_disk(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
+ percpu_ref_exit(&mddev->writes_pending);
mddev_free(mddev);
}
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 49d6c8bdec41..48ae2e0adf9e 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1098,7 +1098,7 @@ static int imx290_runtime_suspend(struct device *dev)
}
static const struct dev_pm_ops imx290_pm_ops = {
- SET_RUNTIME_PM_OPS(imx290_runtime_suspend, imx290_runtime_resume, NULL)
+ RUNTIME_PM_OPS(imx290_runtime_suspend, imx290_runtime_resume, NULL)
};
/* ----------------------------------------------------------------------------
@@ -1362,8 +1362,8 @@ static struct i2c_driver imx290_i2c_driver = {
.remove = imx290_remove,
.driver = {
.name = "imx290",
- .pm = &imx290_pm_ops,
- .of_match_table = of_match_ptr(imx290_of_match),
+ .pm = pm_ptr(&imx290_pm_ops),
+ .of_match_table = imx290_of_match,
},
};
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 61ff20a7e935..cfb11c551167 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -38,8 +38,8 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
writel(0, wrapper_base + WRAPPER_CPA_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
- writel(0, wrapper_base + WRAPPER_NONPIX_START_ADDR);
- writel(0, wrapper_base + WRAPPER_NONPIX_END_ADDR);
+ writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
+ writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
if (IS_V6(core)) {
/* Bring XTSS out of reset */
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index bf7667845459..bbfaf6536903 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
return card;
err_out:
host->card = old_card;
+ kfree_const(card->dev.kobj.name);
kfree(card);
return NULL;
}
@@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
put_device(&card->dev);
host->card = NULL;
}
- } else
+ } else {
+ kfree_const(card->dev.kobj.name);
kfree(card);
+ }
}
out_power_off:
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 89953093e20c..672d37ea98d0 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
*/
case MMC_TIMING_SD_HS:
case MMC_TIMING_MMC_HS:
- case MMC_TIMING_UHS_SDR12:
- case MMC_TIMING_UHS_SDR25:
val &= ~SDHCI_CTRL_HISPD;
}
}
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 1e94e7d10b8b..a0a1194dc1d9 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
@@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
- if (!sect_size)
- return mtd_read(mtd, pos, len, &retlen, buf);
+ if (!sect_size) {
+ ret = mtd_read(mtd, pos, len, &retlen, buf);
+ if (ret && !mtd_is_bitflip(ret))
+ return ret;
+ return 0;
+ }
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
- if (ret)
+ if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index a28574c00900..074e14225c06 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -280,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
if (raw) {
len = mtd->writesize + mtd->oobsize;
- cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
+ cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
return;
}
@@ -544,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
goto out;
- cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
+ cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
@@ -568,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
if (ret)
return ret;
- cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
+ cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 5d627048c420..9e74bcd90aaa 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
+ if (conf->timings.mode > 3)
+ return -EOPNOTSUPP;
+
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 0a78045ca1d9..522d375aeccf 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -3343,7 +3343,19 @@ static struct spi_mem_driver spi_nor_driver = {
.remove = spi_nor_remove,
.shutdown = spi_nor_shutdown,
};
-module_spi_mem_driver(spi_nor_driver);
+
+static int __init spi_nor_module_init(void)
+{
+ return spi_mem_driver_register(&spi_nor_driver);
+}
+module_init(spi_nor_module_init);
+
+static void __exit spi_nor_module_exit(void)
+{
+ spi_mem_driver_unregister(&spi_nor_driver);
+ spi_nor_debugfs_shutdown();
+}
+module_exit(spi_nor_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 25423225c29d..e0cc42a4a0c8 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -711,8 +711,10 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
#ifdef CONFIG_DEBUG_FS
void spi_nor_debugfs_register(struct spi_nor *nor);
+void spi_nor_debugfs_shutdown(void);
#else
static inline void spi_nor_debugfs_register(struct spi_nor *nor) {}
+static inline void spi_nor_debugfs_shutdown(void) {}
#endif
#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index 845b78c7ecc7..fc7ad203df12 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -226,13 +226,13 @@ static void spi_nor_debugfs_unregister(void *data)
nor->debugfs_root = NULL;
}
+static struct dentry *rootdir;
+
void spi_nor_debugfs_register(struct spi_nor *nor)
{
- struct dentry *rootdir, *d;
+ struct dentry *d;
int ret;
- /* Create rootdir once. Will never be deleted again. */
- rootdir = debugfs_lookup(SPI_NOR_DEBUGFS_ROOT, NULL);
if (!rootdir)
rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
@@ -247,3 +247,8 @@ void spi_nor_debugfs_register(struct spi_nor *nor)
debugfs_create_file("capabilities", 0444, d, nor,
&spi_nor_capabilities_fops);
}
+
+void spi_nor_debugfs_shutdown(void)
+{
+ debugfs_remove(rootdir);
+}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 0904eb40c95f..ad025b2ee417 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -666,12 +666,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
- if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
- ubi->vid_hdr_alsize)) {
- ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
- return -EINVAL;
- }
-
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@@ -689,6 +683,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->vid_hdr_aloffset;
}
+ /*
+ * Memory allocation for VID header is ubi->vid_hdr_alsize
+ * which is described in comments in io.c.
+ * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
+ * ubi->vid_hdr_alsize, so that all vid header operations
+ * won't access memory out of bounds.
+ */
+ if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
+ ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
+ " + VID header size(%zu) > VID header aligned size(%d).",
+ ubi->vid_hdr_offset, ubi->vid_hdr_shift,
+ UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
+ return -EINVAL;
+ }
+
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 40f39e5d6dfc..26a214f016c1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -575,7 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
- * @nested: denotes whether the work_sem is already held in read mode
+ * @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
@@ -1131,7 +1131,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 236e5219c811..710548dbd0c1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1777,14 +1777,15 @@ void bond_lower_state_changed(struct slave *slave)
/* The bonding driver uses ether_setup() to convert a master bond device
* to ARPHRD_ETHER, that resets the target netdevice's flags so we always
- * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
+ * if they were set
*/
static void bond_ether_setup(struct net_device *bond_dev)
{
- unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
+ unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
ether_setup(bond_dev);
- bond_dev->flags |= IFF_MASTER | slave_flag;
+ bond_dev->flags |= IFF_MASTER | flags;
bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
}
@@ -3269,7 +3270,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
- combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+ (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
+ combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
goto out;
saddr = &combined->ip6.saddr;
@@ -3291,7 +3293,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
- bond_validate_na(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, daddr, saddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_na(bond, slave, saddr, daddr);
@@ -5695,9 +5697,13 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
struct ethtool_ts_info *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct ethtool_ts_info ts_info;
const struct ethtool_ops *ops;
struct net_device *real_dev;
+ bool sw_tx_support = false;
struct phy_device *phydev;
+ struct list_head *iter;
+ struct slave *slave;
int ret = 0;
rcu_read_lock();
@@ -5716,10 +5722,36 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
ret = ops->get_ts_info(real_dev, info);
goto out;
}
+ } else {
+ /* Check if all slaves support software tx timestamping */
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ ret = -1;
+ ops = slave->dev->ethtool_ops;
+ phydev = slave->dev->phydev;
+
+ if (phy_has_tsinfo(phydev))
+ ret = phy_ts_info(phydev, &ts_info);
+ else if (ops->get_ts_info)
+ ret = ops->get_ts_info(slave->dev, &ts_info);
+
+ if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
+ sw_tx_support = true;
+ continue;
+ }
+
+ sw_tx_support = false;
+ break;
+ }
+ rcu_read_unlock();
}
+ ret = 0;
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
+ if (sw_tx_support)
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
+
info->phc_index = -1;
out:
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index ea05abfbd51d..e68465fdf6b9 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -21,10 +21,6 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
-int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
-void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
@@ -32,6 +28,10 @@ void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
void ksz8_port_init_cnt(struct ksz_device *dev, int port);
int ksz8_fdb_dump(struct ksz_device *dev, int port,
dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db);
+int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db);
int ksz8_mdb_add(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
int ksz8_mdb_del(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 3fffd5da8d3b..f56fca1b1a22 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -96,7 +96,7 @@ static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
ctrl2 |= SW_LEGAL_PACKET_DISABLE;
- else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
ctrl1 |= SW_HUGE_PACKET;
ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
@@ -336,34 +336,48 @@ void ksz8_port_init_cnt(struct ksz_device *dev, int port)
}
}
-static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
+static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
const u16 *regs;
u16 ctrl_addr;
+ int ret;
regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
- ksz_read64(dev, regs[REG_IND_DATA_HI], data);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], data);
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
+
+ return ret;
}
-static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
+static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
const u16 *regs;
u16 ctrl_addr;
+ int ret;
regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write64(dev, regs[REG_IND_DATA_HI], data);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write64(dev, regs[REG_IND_DATA_HI], data);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
+
+ return ret;
}
static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
@@ -457,46 +471,54 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
return rc;
}
-int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu, bool *valid)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
+ int ret;
shifts = dev->info->shifts;
masks = dev->info->masks;
- ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ ret = ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ if (ret)
+ return ret;
+
data_hi = data >> 32;
data_lo = (u32)data;
- if (data_hi & (masks[STATIC_MAC_TABLE_VALID] |
- masks[STATIC_MAC_TABLE_OVERRIDE])) {
- alu->mac[5] = (u8)data_lo;
- alu->mac[4] = (u8)(data_lo >> 8);
- alu->mac[3] = (u8)(data_lo >> 16);
- alu->mac[2] = (u8)(data_lo >> 24);
- alu->mac[1] = (u8)data_hi;
- alu->mac[0] = (u8)(data_hi >> 8);
- alu->port_forward =
- (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
- shifts[STATIC_MAC_FWD_PORTS];
- alu->is_override =
- (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- data_hi >>= 1;
- alu->is_static = true;
- alu->is_use_fid =
- (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
- alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
- shifts[STATIC_MAC_FID];
+
+ if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
+ masks[STATIC_MAC_TABLE_OVERRIDE]))) {
+ *valid = false;
return 0;
}
- return -ENXIO;
+
+ alu->mac[5] = (u8)data_lo;
+ alu->mac[4] = (u8)(data_lo >> 8);
+ alu->mac[3] = (u8)(data_lo >> 16);
+ alu->mac[2] = (u8)(data_lo >> 24);
+ alu->mac[1] = (u8)data_hi;
+ alu->mac[0] = (u8)(data_hi >> 8);
+ alu->port_forward =
+ (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
+ shifts[STATIC_MAC_FWD_PORTS];
+ alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
+ data_hi >>= 1;
+ alu->is_static = true;
+ alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
+ alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
+ shifts[STATIC_MAC_FID];
+
+ *valid = true;
+
+ return 0;
}
-void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+static int ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
u32 data_hi, data_lo;
const u8 *shifts;
@@ -524,7 +546,8 @@ void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
data = (u64)data_hi << 32 | data_lo;
- ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
+
+ return ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
}
static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
@@ -977,24 +1000,29 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
return ret;
}
-int ksz8_mdb_add(struct ksz_device *dev, int port,
- const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
{
struct alu_struct alu;
- int index;
+ int index, ret;
int empty = 0;
alu.port_forward = 0;
for (index = 0; index < dev->info->num_statics; index++) {
- if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- /* Remember the first empty entry. */
- } else if (!empty) {
- empty = index + 1;
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid) {
+ /* Remember the first empty entry. */
+ if (!empty)
+ empty = index + 1;
+ continue;
}
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
}
/* no available entry */
@@ -1005,48 +1033,73 @@ int ksz8_mdb_add(struct ksz_device *dev, int port,
if (index == dev->info->num_statics) {
index = empty - 1;
memset(&alu, 0, sizeof(alu));
- memcpy(alu.mac, mdb->addr, ETH_ALEN);
+ memcpy(alu.mac, addr, ETH_ALEN);
alu.is_static = true;
}
alu.port_forward |= BIT(port);
- if (mdb->vid) {
+ if (vid) {
alu.is_use_fid = true;
/* Need a way to map VID to FID. */
- alu.fid = mdb->vid;
+ alu.fid = vid;
}
- ksz8_w_sta_mac_table(dev, index, &alu);
- return 0;
+ return ksz8_w_sta_mac_table(dev, index, &alu);
}
-int ksz8_mdb_del(struct ksz_device *dev, int port,
- const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
{
struct alu_struct alu;
- int index;
+ int index, ret;
for (index = 0; index < dev->info->num_statics; index++) {
- if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- }
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid)
+ continue;
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
}
/* no available entry */
if (index == dev->info->num_statics)
- goto exit;
+ return 0;
/* clear port */
alu.port_forward &= ~BIT(port);
if (!alu.port_forward)
alu.is_static = false;
- ksz8_w_sta_mac_table(dev, index, &alu);
-exit:
- return 0;
+ return ksz8_w_sta_mac_table(dev, index, &alu);
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, addr, vid);
+}
+
+int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, addr, vid);
}
int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
@@ -1346,9 +1399,7 @@ int ksz8_enable_stp_addr(struct ksz_device *dev)
alu.is_override = true;
alu.port_forward = dev->info->cpu_ports;
- ksz8_w_sta_mac_table(dev, 0, &alu);
-
- return 0;
+ return ksz8_w_sta_mac_table(dev, 0, &alu);
}
int ksz8_setup(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 5568d7b22135..a4428be5f483 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -200,6 +200,8 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.freeze_mib = ksz8_freeze_mib,
.port_init_cnt = ksz8_port_init_cnt,
.fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
.mdb_add = ksz8_mdb_add,
.mdb_del = ksz8_mdb_del,
.vlan_filtering = ksz8_port_vlan_filtering,
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 34a547b88e49..088533663b83 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -81,14 +81,17 @@ static const struct regmap_bus mt7530_regmap_bus = {
};
static int
-mt7531_create_sgmii(struct mt7530_priv *priv)
+mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
{
- struct regmap_config *mt7531_pcs_config[2];
+ struct regmap_config *mt7531_pcs_config[2] = {};
struct phylink_pcs *pcs;
struct regmap *regmap;
int i, ret = 0;
- for (i = 0; i < 2; i++) {
+ /* MT7531AE has two SGMII units for port 5 and port 6
+ * MT7531BE has only one SGMII unit for port 6
+ */
+ for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
sizeof(struct regmap_config),
GFP_KERNEL);
@@ -208,11 +211,8 @@ mt7530_probe(struct mdio_device *mdiodev)
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
- if (priv->id == ID_MT7531) {
- ret = mt7531_create_sgmii(priv);
- if (ret)
- return ret;
- }
+ if (priv->id == ID_MT7531)
+ priv->create_sgmii = mt7531_create_sgmii;
return dsa_register_switch(priv->ds);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index e4bb5037d352..c680873819b0 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -3018,6 +3018,12 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ if (priv->create_sgmii) {
+ ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
+ if (ret && priv->irq)
+ mt7530_free_irq(priv);
+ }
+
return ret;
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 01db5c9724fa..5084f48a8869 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -748,10 +748,10 @@ struct mt753x_info {
* registers
* @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
- *
* @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
+ * @create_sgmii: Pointer to function creating SGMII PCS instance(s)
*/
struct mt7530_priv {
struct device *dev;
@@ -770,7 +770,6 @@ struct mt7530_priv {
unsigned int p5_intf_sel;
u8 mirror_rx;
u8 mirror_tx;
-
struct mt7530_port ports[MT7530_NUM_PORTS];
struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
@@ -778,6 +777,7 @@ struct mt7530_priv {
int irq;
struct irq_domain *irq_domain;
u32 irq_enable;
+ int (*create_sgmii)(struct mt7530_priv *priv, bool dual_sgmii);
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 62a126402983..dc263cea205f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -611,10 +611,10 @@ static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
}
static const u8 mv88e6xxx_phy_interface_modes[] = {
- [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_REVMII,
[MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
[MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
- [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_REVRMII,
[MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
[MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
[MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
@@ -5613,7 +5613,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
* .port_set_upstream_port method.
*/
.set_egress_port = mv88e6393x_set_egress_port,
- .watchdog_ops = &mv88e6390_watchdog_ops,
+ .watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index a26546d3d7b5..615896893076 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -943,6 +943,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
.irq_free = mv88e6390_watchdog_free,
};
+static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ mv88e6390_watchdog_action(chip, irq);
+
+ /* Fix for clearing the force WD event bit.
+ * Unreleased erratum on mv88e6393x.
+ */
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+
+ return IRQ_HANDLED;
+}
+
+const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
+ .irq_action = mv88e6393x_watchdog_action,
+ .irq_setup = mv88e6390_watchdog_setup,
+ .irq_free = mv88e6390_watchdog_free,
+};
+
static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index e973114d6890..7e091965582b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -369,6 +369,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 6dcebcfd71e7..80861ac090ae 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1550,11 +1550,6 @@ static int felix_connect_tag_protocol(struct dsa_switch *ds,
}
}
-/* Hardware initialization done here so that we can allocate structures with
- * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
- * us to allocate structures twice (leak memory) and map PCI memory twice
- * (which will not work).
- */
static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index dddb28984bdf..cfb3faeaa5bf 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1424,6 +1424,7 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
mutex_lock(&ocelot->tas_lock);
if (!taprio->enable) {
+ ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
@@ -1436,15 +1437,19 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
return 0;
}
+ ret = ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+ if (ret)
+ goto err_unlock;
+
if (taprio->cycle_time > NSEC_PER_SEC ||
taprio->cycle_time_extension >= NSEC_PER_SEC) {
ret = -EINVAL;
- goto err;
+ goto err_reset_tc;
}
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
ret = -ERANGE;
- goto err;
+ goto err_reset_tc;
}
/* Enable guard band. The switch will schedule frames without taking
@@ -1468,7 +1473,7 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
ret = -EBUSY;
- goto err;
+ goto err_reset_tc;
}
ocelot_rmw_rix(ocelot,
@@ -1503,12 +1508,19 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
if (ret)
- goto err;
+ goto err_reset_tc;
ocelot_port->taprio = taprio_offload_get(taprio);
vsc9959_tas_guard_bands_update(ocelot, port);
-err:
+ mutex_unlock(&ocelot->tas_lock);
+
+ return 0;
+
+err_reset_tc:
+ taprio->mqprio.qopt.num_tc = 0;
+ ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+err_unlock:
mutex_unlock(&ocelot->tas_lock);
return ret;
@@ -1612,6 +1624,13 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
{
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -1635,6 +1654,8 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return ocelot_port_mqprio(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
return vsc9959_qos_port_cbs_set(ds, port, type_data);
default:
@@ -2498,6 +2519,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
int min_speed = ocelot_port->speed;
unsigned long mask = 0;
u32 tmp, val = 0;
@@ -2538,10 +2560,12 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
/* Enable cut-through forwarding for all traffic classes that
* don't have oversized dropping enabled, since this check is
- * bypassed in cut-through mode.
+ * bypassed in cut-through mode. Also exclude preemptible
+ * traffic classes, since these would hang the port for some
+ * reason, if sent as cut-through.
*/
if (ocelot_port->speed == min_speed) {
- val = GENMASK(7, 0);
+ val = GENMASK(7, 0) & ~mm->active_preemptible_tcs;
for (tc = 0; tc < OCELOT_NUM_TC; tc++)
if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
@@ -2610,12 +2634,9 @@ static const struct felix_info felix_info_vsc9959 = {
static irqreturn_t felix_irq_handler(int irq, void *data)
{
struct ocelot *ocelot = (struct ocelot *)data;
- int port;
ocelot_get_txtstamp(ocelot);
-
- for (port = 0; port < ocelot->num_phys_ports; port++)
- ocelot_port_mm_irq(ocelot, port);
+ ocelot_mm_irq(ocelot);
return IRQ_HANDLED;
}
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
index ba339747362c..7a86d6d6a246 100644
--- a/drivers/net/dsa/qca/Kconfig
+++ b/drivers/net/dsa/qca/Kconfig
@@ -15,3 +15,11 @@ config NET_DSA_QCA8K
help
This enables support for the Qualcomm Atheros QCA8K Ethernet
switch chips.
+
+config NET_DSA_QCA8K_LEDS_SUPPORT
+ bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support"
+ depends on NET_DSA_QCA8K
+ depends on LEDS_CLASS
+ help
+ This enabled support for LEDs present on the Qualcomm Atheros
+ QCA8K Ethernet switch chips.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
index 701f1d199e93..ce66b1984e5f 100644
--- a/drivers/net/dsa/qca/Makefile
+++ b/drivers/net/dsa/qca/Makefile
@@ -2,3 +2,6 @@
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
qca8k-y += qca8k-common.o qca8k-8xxx.o
+ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+qca8k-y += qca8k-leds.o
+endif
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 62810903f1b3..6d5ac7588a69 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -22,6 +22,7 @@
#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
+#include "qca8k_leds.h"
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
@@ -772,21 +773,6 @@ err_clear_skb:
return ret;
}
-static u32
-qca8k_port_to_phy(int port)
-{
- /* From Andrew Lunn:
- * Port 0 has no internal phy.
- * Port 1 has an internal PHY at MDIO address 0.
- * Port 2 has an internal PHY at MDIO address 1.
- * ...
- * Port 5 has an internal PHY at MDIO address 4.
- * Port 6 has no internal PHY.
- */
-
- return port - 1;
-}
-
static int
qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{
@@ -1797,6 +1783,10 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ ret = qca8k_setup_led_ctrl(priv);
+ if (ret)
+ return ret;
+
qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
new file mode 100644
index 000000000000..b883692b7d86
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#include "qca8k.h"
+#include "qca8k_leds.h"
+
+static int
+qca8k_get_enable_led_reg(int port_num, int led_num, struct qca8k_led_pattern_en *reg_info)
+{
+ switch (port_num) {
+ case 0:
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+ reg_info->shift = QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ /* Port 123 are controlled on a different reg */
+ reg_info->reg = QCA8K_LED_CTRL3_REG;
+ reg_info->shift = QCA8K_LED_PHY123_PATTERN_EN_SHIFT(port_num, led_num);
+ break;
+ case 4:
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+ reg_info->shift = QCA8K_LED_PHY4_CONTROL_RULE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_led_brightness_set(struct qca8k_led *led,
+ enum led_brightness brightness)
+{
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 mask, val;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ val = QCA8K_LED_ALWAYS_OFF;
+ if (brightness)
+ val = QCA8K_LED_ALWAYS_ON;
+
+ /* HW regs to control brightness is special and port 1-2-3
+ * are placed in a different reg.
+ *
+ * To control port 0 brightness:
+ * - the 2 bit (15, 14) of:
+ * - QCA8K_LED_CTRL0_REG for led1
+ * - QCA8K_LED_CTRL1_REG for led2
+ * - QCA8K_LED_CTRL2_REG for led3
+ *
+ * To control port 4:
+ * - the 2 bit (31, 30) of:
+ * - QCA8K_LED_CTRL0_REG for led1
+ * - QCA8K_LED_CTRL1_REG for led2
+ * - QCA8K_LED_CTRL2_REG for led3
+ *
+ * To control port 1:
+ * - the 2 bit at (9, 8) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (11, 10) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (13, 12) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To control port 2:
+ * - the 2 bit at (15, 14) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (17, 16) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (19, 18) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To control port 3:
+ * - the 2 bit at (21, 20) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (23, 22) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (25, 24) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To abstract this and have less code, we use the port and led numm
+ * to calculate the shift and the correct reg due to this problem of
+ * not having a 1:1 map of LED with the regs.
+ */
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ return regmap_update_bits(priv->regmap, reg_info.reg,
+ mask << reg_info.shift,
+ val << reg_info.shift);
+}
+
+static int
+qca8k_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+
+ return qca8k_led_brightness_set(led, brightness);
+}
+
+static enum led_brightness
+qca8k_led_brightness_get(struct qca8k_led *led)
+{
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 val;
+ int ret;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ ret = regmap_read(priv->regmap, reg_info.reg, &val);
+ if (ret)
+ return 0;
+
+ val >>= reg_info.shift;
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ val &= QCA8K_LED_PATTERN_EN_MASK;
+ val >>= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ val &= QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ /* Assume brightness ON only when the LED is set to always ON */
+ return val == QCA8K_LED_ALWAYS_ON;
+}
+
+static int
+qca8k_cled_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+ u32 mask, val = QCA8K_LED_ALWAYS_BLINK_4HZ;
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 125;
+ *delay_off = 125;
+ }
+
+ if (*delay_on != 125 || *delay_off != 125) {
+ /* The hardware only supports blinking at 4Hz. Fall back
+ * to software implementation in other cases.
+ */
+ return -EINVAL;
+ }
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ regmap_update_bits(priv->regmap, reg_info.reg, mask << reg_info.shift,
+ val << reg_info.shift);
+
+ return 0;
+}
+
+static int
+qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int port_num)
+{
+ struct fwnode_handle *led = NULL, *leds = NULL;
+ struct led_init_data init_data = { };
+ struct dsa_switch *ds = priv->ds;
+ enum led_default_state state;
+ struct qca8k_led *port_led;
+ int led_num, led_index;
+ int ret;
+
+ leds = fwnode_get_named_child_node(port, "leds");
+ if (!leds) {
+ dev_dbg(priv->dev, "No Leds node specified in device tree for port %d!\n",
+ port_num);
+ return 0;
+ }
+
+ fwnode_for_each_child_node(leds, led) {
+ /* Reg represent the led number of the port.
+ * Each port can have at most 3 leds attached
+ * Commonly:
+ * 1. is gigabit led
+ * 2. is mbit led
+ * 3. additional status led
+ */
+ if (fwnode_property_read_u32(led, "reg", &led_num))
+ continue;
+
+ if (led_num >= QCA8K_LED_PORT_COUNT) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_num, port_num);
+ continue;
+ }
+
+ led_index = QCA8K_LED_PORT_INDEX(port_num, led_num);
+
+ port_led = &priv->ports_led[led_index];
+ port_led->port_num = port_num;
+ port_led->led_num = led_num;
+ port_led->priv = priv;
+
+ state = led_init_default_state_get(led);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ port_led->cdev.brightness = 1;
+ qca8k_led_brightness_set(port_led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ port_led->cdev.brightness =
+ qca8k_led_brightness_get(port_led);
+ break;
+ default:
+ port_led->cdev.brightness = 0;
+ qca8k_led_brightness_set(port_led, 0);
+ }
+
+ port_led->cdev.max_brightness = 1;
+ port_led->cdev.brightness_set_blocking = qca8k_cled_brightness_set_blocking;
+ port_led->cdev.blink_set = qca8k_cled_blink_set;
+ init_data.default_label = ":port";
+ init_data.fwnode = led;
+ init_data.devname_mandatory = true;
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id,
+ port_num);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
+ if (ret)
+ dev_warn(priv->dev, "Failed to init LED %d for port %d", led_num, port_num);
+
+ kfree(init_data.devicename);
+ }
+
+ return 0;
+}
+
+int
+qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+{
+ struct fwnode_handle *ports, *port;
+ int port_num;
+ int ret;
+
+ ports = device_get_named_child_node(priv->dev, "ports");
+ if (!ports) {
+ dev_info(priv->dev, "No ports node specified in device tree!");
+ return 0;
+ }
+
+ fwnode_for_each_child_node(ports, port) {
+ if (fwnode_property_read_u32(port, "reg", &port_num))
+ continue;
+
+ /* Skip checking for CPU port 0 and CPU port 6 as not supported */
+ if (port_num == 0 || port_num == 6)
+ continue;
+
+ /* Each port can have at most 3 different leds attached.
+ * Switch port starts from 0 to 6, but port 0 and 6 are CPU
+ * port. The port index needs to be decreased by one to identify
+ * the correct port for LED setup.
+ */
+ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 7996975d29d3..c5cc8a172d65 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
+#include <linux/leds.h>
#include <linux/dsa/tag_qca.h>
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
@@ -85,6 +86,51 @@
#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
#define QCA8K_MDIO_MASTER_MAX_PORTS 5
#define QCA8K_MDIO_MASTER_MAX_REG 32
+
+/* LED control register */
+#define QCA8K_LED_PORT_COUNT 3
+#define QCA8K_LED_COUNT ((QCA8K_NUM_PORTS - QCA8K_NUM_CPU_PORTS) * QCA8K_LED_PORT_COUNT)
+#define QCA8K_LED_RULE_COUNT 6
+#define QCA8K_LED_RULE_MAX 11
+#define QCA8K_LED_PORT_INDEX(_phy, _led) (((_phy) * QCA8K_LED_PORT_COUNT) + (_led))
+
+#define QCA8K_LED_PHY123_PATTERN_EN_SHIFT(_phy, _led) ((((_phy) - 1) * 6) + 8 + (2 * (_led)))
+#define QCA8K_LED_PHY123_PATTERN_EN_MASK GENMASK(1, 0)
+
+#define QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT 0
+#define QCA8K_LED_PHY4_CONTROL_RULE_SHIFT 16
+
+#define QCA8K_LED_CTRL_REG(_i) (0x050 + (_i) * 4)
+#define QCA8K_LED_CTRL0_REG 0x50
+#define QCA8K_LED_CTRL1_REG 0x54
+#define QCA8K_LED_CTRL2_REG 0x58
+#define QCA8K_LED_CTRL3_REG 0x5C
+#define QCA8K_LED_CTRL_SHIFT(_i) (((_i) % 2) * 16)
+#define QCA8K_LED_CTRL_MASK GENMASK(15, 0)
+#define QCA8K_LED_RULE_MASK GENMASK(13, 0)
+#define QCA8K_LED_BLINK_FREQ_MASK GENMASK(1, 0)
+#define QCA8K_LED_BLINK_FREQ_SHITF 0
+#define QCA8K_LED_BLINK_2HZ 0
+#define QCA8K_LED_BLINK_4HZ 1
+#define QCA8K_LED_BLINK_8HZ 2
+#define QCA8K_LED_BLINK_AUTO 3
+#define QCA8K_LED_LINKUP_OVER_MASK BIT(2)
+#define QCA8K_LED_TX_BLINK_MASK BIT(4)
+#define QCA8K_LED_RX_BLINK_MASK BIT(5)
+#define QCA8K_LED_COL_BLINK_MASK BIT(7)
+#define QCA8K_LED_LINK_10M_EN_MASK BIT(8)
+#define QCA8K_LED_LINK_100M_EN_MASK BIT(9)
+#define QCA8K_LED_LINK_1000M_EN_MASK BIT(10)
+#define QCA8K_LED_POWER_ON_LIGHT_MASK BIT(11)
+#define QCA8K_LED_HALF_DUPLEX_MASK BIT(12)
+#define QCA8K_LED_FULL_DUPLEX_MASK BIT(13)
+#define QCA8K_LED_PATTERN_EN_MASK GENMASK(15, 14)
+#define QCA8K_LED_PATTERN_EN_SHIFT 14
+#define QCA8K_LED_ALWAYS_OFF 0
+#define QCA8K_LED_ALWAYS_BLINK_4HZ 1
+#define QCA8K_LED_ALWAYS_ON 2
+#define QCA8K_LED_RULE_CONTROLLED 3
+
#define QCA8K_GOL_MAC_ADDR0 0x60
#define QCA8K_GOL_MAC_ADDR1 0x64
#define QCA8K_MAX_FRAME_SIZE 0x78
@@ -382,6 +428,19 @@ struct qca8k_pcs {
int port;
};
+struct qca8k_led_pattern_en {
+ u32 reg;
+ u8 shift;
+};
+
+struct qca8k_led {
+ u8 port_num;
+ u8 led_num;
+ u16 old_rule;
+ struct qca8k_priv *priv;
+ struct led_classdev cdev;
+};
+
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
@@ -406,6 +465,7 @@ struct qca8k_priv {
struct qca8k_pcs pcs_port_0;
struct qca8k_pcs pcs_port_6;
const struct qca8k_match_data *info;
+ struct qca8k_led ports_led[QCA8K_LED_COUNT];
};
struct qca8k_mib_desc {
@@ -421,6 +481,20 @@ struct qca8k_fdb {
u8 mac[6];
};
+static inline u32 qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
/* Common setup function */
extern const struct qca8k_mib_desc ar8327_mib[];
extern const struct regmap_access_table qca8k_readable_table;
diff --git a/drivers/net/dsa/qca/qca8k_leds.h b/drivers/net/dsa/qca/qca8k_leds.h
new file mode 100644
index 000000000000..ab367f05b173
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k_leds.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __QCA8K_LEDS_H
+#define __QCA8K_LEDS_H
+
+/* Leds Support function */
+#ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+int qca8k_setup_led_ctrl(struct qca8k_priv *priv);
+#else
+static inline int qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+{
+ return 0;
+}
+#endif
+
+#endif /* __QCA8K_LEDS_H */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index ab42f75b9413..235fcacef5c5 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -186,4 +186,16 @@ config AMD_XGBE_HAVE_ECC
bool
default n
+config PDS_CORE
+ tristate "AMD/Pensando Data Systems Core Device Support"
+ depends on 64BIT && PCI
+ help
+ This enables the support for the AMD/Pensando Core device family of
+ adapters. More specific information on this driver can be
+ found in
+ <file:Documentation/networking/device_drivers/ethernet/amd/pds_core.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pds_core.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 42742afe9115..2dcfb84731e1 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
obj-$(CONFIG_AMD_XGBE) += xgbe/
+obj-$(CONFIG_PDS_CORE) += pds_core/
diff --git a/drivers/net/ethernet/amd/pds_core/Makefile b/drivers/net/ethernet/amd/pds_core/Makefile
new file mode 100644
index 000000000000..0abc33ce826c
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2023 Advanced Micro Devices, Inc.
+
+obj-$(CONFIG_PDS_CORE) := pds_core.o
+
+pds_core-y := main.o \
+ devlink.o \
+ auxbus.o \
+ dev.o \
+ adminq.o \
+ core.o \
+ fw.o
+
+pds_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
new file mode 100644
index 000000000000..045fe133f6ee
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/dynamic_debug.h>
+
+#include "core.h"
+
+struct pdsc_wait_context {
+ struct pdsc_qcq *qcq;
+ struct completion wait_completion;
+};
+
+static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
+{
+ union pds_core_notifyq_comp *comp;
+ struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc_cq *cq = &qcq->cq;
+ struct pdsc_cq_info *cq_info;
+ int nq_work = 0;
+ u64 eid;
+
+ cq_info = &cq->info[cq->tail_idx];
+ comp = cq_info->comp;
+ eid = le64_to_cpu(comp->event.eid);
+ while (eid > pdsc->last_eid) {
+ u16 ecode = le16_to_cpu(comp->event.ecode);
+
+ switch (ecode) {
+ case PDS_EVENT_LINK_CHANGE:
+ dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
+ ecode, eid);
+ pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
+ break;
+
+ case PDS_EVENT_RESET:
+ dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
+ ecode, eid);
+ pdsc_notify(PDS_EVENT_RESET, comp);
+ break;
+
+ case PDS_EVENT_XCVR:
+ dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
+ ecode, eid);
+ break;
+
+ default:
+ dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
+ ecode, eid);
+ break;
+ }
+
+ pdsc->last_eid = eid;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
+ comp = cq_info->comp;
+ eid = le64_to_cpu(comp->event.eid);
+
+ nq_work++;
+ }
+
+ qcq->accum_work += nq_work;
+
+ return nq_work;
+}
+
+void pdsc_process_adminq(struct pdsc_qcq *qcq)
+{
+ union pds_core_adminq_comp *comp;
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc_cq *cq = &qcq->cq;
+ struct pdsc_q_info *q_info;
+ unsigned long irqflags;
+ int nq_work = 0;
+ int aq_work = 0;
+ int credits;
+
+ /* Don't process AdminQ when shutting down */
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
+ dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ __func__);
+ return;
+ }
+
+ /* Check for NotifyQ event */
+ nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
+
+ /* Check for empty queue, which can happen if the interrupt was
+ * for a NotifyQ event and there are no new AdminQ completions.
+ */
+ if (q->tail_idx == q->head_idx)
+ goto credits;
+
+ /* Find the first completion to clean,
+ * run the callback in the related q_info,
+ * and continue while we still match done color
+ */
+ spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
+ comp = cq->info[cq->tail_idx].comp;
+ while (pdsc_color_match(comp->color, cq->done_color)) {
+ q_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+
+ /* Copy out the completion data */
+ memcpy(q_info->dest, comp, sizeof(*comp));
+
+ complete_all(&q_info->wc->wait_completion);
+
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ comp = cq->info[cq->tail_idx].comp;
+
+ aq_work++;
+ }
+ spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
+
+ qcq->accum_work += aq_work;
+
+credits:
+ /* Return the interrupt credits, one for each completion */
+ credits = nq_work + aq_work;
+ if (credits)
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ credits,
+ PDS_CORE_INTR_CRED_REARM);
+}
+
+void pdsc_work_thread(struct work_struct *work)
+{
+ struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
+
+ pdsc_process_adminq(qcq);
+}
+
+irqreturn_t pdsc_adminq_isr(int irq, void *data)
+{
+ struct pdsc_qcq *qcq = data;
+ struct pdsc *pdsc = qcq->pdsc;
+
+ /* Don't process AdminQ when shutting down */
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
+ dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ queue_work(pdsc->wq, &qcq->work);
+ pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
+
+ return IRQ_HANDLED;
+}
+
+static int __pdsc_adminq_post(struct pdsc *pdsc,
+ struct pdsc_qcq *qcq,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ struct pdsc_wait_context *wc)
+{
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_q_info *q_info;
+ unsigned long irqflags;
+ unsigned int avail;
+ int index;
+ int ret;
+
+ spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
+
+ /* Check for space in the queue */
+ avail = q->tail_idx;
+ if (q->head_idx >= avail)
+ avail += q->num_descs - q->head_idx - 1;
+ else
+ avail -= q->head_idx + 1;
+ if (!avail) {
+ ret = -ENOSPC;
+ goto err_out_unlock;
+ }
+
+ /* Check that the FW is running */
+ if (!pdsc_is_fw_running(pdsc)) {
+ u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ ret = -ENXIO;
+
+ goto err_out_unlock;
+ }
+
+ /* Post the request */
+ index = q->head_idx;
+ q_info = &q->info[index];
+ q_info->wc = wc;
+ q_info->dest = comp;
+ memcpy(q_info->desc, cmd, sizeof(*cmd));
+
+ dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
+ q->head_idx, q->tail_idx);
+ dev_dbg(pdsc->dev, "post admin queue command:\n");
+ dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ cmd, sizeof(*cmd), true);
+
+ q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
+
+ pds_core_dbell_ring(pdsc->kern_dbpage,
+ q->hw_type, q->dbval | q->head_idx);
+ ret = index;
+
+err_out_unlock:
+ spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
+ return ret;
+}
+
+int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll)
+{
+ struct pdsc_wait_context wc = {
+ .wait_completion =
+ COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
+ };
+ unsigned long poll_interval = 1;
+ unsigned long poll_jiffies;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
+ unsigned long remaining;
+ int err = 0;
+ int index;
+
+ wc.qcq = &pdsc->adminqcq;
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ if (index < 0) {
+ err = index;
+ goto err_out;
+ }
+
+ time_start = jiffies;
+ time_limit = time_start + HZ * pdsc->devcmd_timeout;
+ do {
+ /* Timeslice the actual wait to catch IO errors etc early */
+ poll_jiffies = msecs_to_jiffies(poll_interval);
+ remaining = wait_for_completion_timeout(&wc.wait_completion,
+ poll_jiffies);
+ if (remaining)
+ break;
+
+ if (!pdsc_is_fw_running(pdsc)) {
+ u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ err = -ENXIO;
+ break;
+ }
+
+ /* When fast_poll is not requested, prevent aggressive polling
+ * on failures due to timeouts by doing exponential back off.
+ */
+ if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
+ poll_interval <<= 1;
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+ dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ /* Check the results */
+ if (time_after_eq(time_done, time_limit))
+ err = -ETIMEDOUT;
+
+ dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ if (remaining && comp->status)
+ err = pdsc_err_to_errno(comp->status);
+
+err_out:
+ if (err) {
+ dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
+ __func__, cmd->opcode, comp->status, ERR_PTR(err));
+ if (err == -ENXIO || err == -ETIMEDOUT)
+ queue_work(pdsc->wq, &pdsc->health_work);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pdsc_adminq_post);
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
new file mode 100644
index 000000000000..561af8e5b3ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+
+#include "core.h"
+#include <linux/pds/pds_auxbus.h>
+
+/**
+ * pds_client_register - Link the client to the firmware
+ * @pf_pdev: ptr to the PF driver struct
+ * @devname: name that includes service into, e.g. pds_core.vDPA
+ *
+ * Return: 0 on success, or
+ * negative for error
+ */
+int pds_client_register(struct pci_dev *pf_pdev, char *devname)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ struct pdsc *pf;
+ int err;
+ u16 ci;
+
+ pf = pci_get_drvdata(pf_pdev);
+ if (pf->state)
+ return -ENXIO;
+
+ cmd.client_reg.opcode = PDS_AQ_CMD_CLIENT_REG;
+ strscpy(cmd.client_reg.devname, devname,
+ sizeof(cmd.client_reg.devname));
+
+ err = pdsc_adminq_post(pf, &cmd, &comp, false);
+ if (err) {
+ dev_info(pf->dev, "register dev_name %s with DSC failed, status %d: %pe\n",
+ devname, comp.status, ERR_PTR(err));
+ return err;
+ }
+
+ ci = le16_to_cpu(comp.client_reg.client_id);
+ if (!ci) {
+ dev_err(pf->dev, "%s: device returned null client_id\n",
+ __func__);
+ return -EIO;
+ }
+
+ dev_dbg(pf->dev, "%s: device returned client_id %d for %s\n",
+ __func__, ci, devname);
+
+ return ci;
+}
+EXPORT_SYMBOL_GPL(pds_client_register);
+
+/**
+ * pds_client_unregister - Unlink the client from the firmware
+ * @pf_pdev: ptr to the PF driver struct
+ * @client_id: id returned from pds_client_register()
+ *
+ * Return: 0 on success, or
+ * negative for error
+ */
+int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ struct pdsc *pf;
+ int err;
+
+ pf = pci_get_drvdata(pf_pdev);
+ if (pf->state)
+ return -ENXIO;
+
+ cmd.client_unreg.opcode = PDS_AQ_CMD_CLIENT_UNREG;
+ cmd.client_unreg.client_id = cpu_to_le16(client_id);
+
+ err = pdsc_adminq_post(pf, &cmd, &comp, false);
+ if (err)
+ dev_info(pf->dev, "unregister client_id %d failed, status %d: %pe\n",
+ client_id, comp.status, ERR_PTR(err));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pds_client_unregister);
+
+/**
+ * pds_client_adminq_cmd - Process an adminq request for the client
+ * @padev: ptr to the client device
+ * @req: ptr to buffer with request
+ * @req_len: length of actual struct used for request
+ * @resp: ptr to buffer where answer is to be copied
+ * @flags: optional flags from pds_core_adminq_flags
+ *
+ * Return: 0 on success, or
+ * negative for error
+ *
+ * Client sends pointers to request and response buffers
+ * Core copies request data into pds_core_client_request_cmd
+ * Core sets other fields as needed
+ * Core posts to AdminQ
+ * Core copies completion data into response buffer
+ */
+int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ union pds_core_adminq_cmd *req,
+ size_t req_len,
+ union pds_core_adminq_comp *resp,
+ u64 flags)
+{
+ union pds_core_adminq_cmd cmd = {};
+ struct pci_dev *pf_pdev;
+ struct pdsc *pf;
+ size_t cp_len;
+ int err;
+
+ pf_pdev = pci_physfn(padev->vf_pdev);
+ pf = pci_get_drvdata(pf_pdev);
+
+ dev_dbg(pf->dev, "%s: %s opcode %d\n",
+ __func__, dev_name(&padev->aux_dev.dev), req->opcode);
+
+ if (pf->state)
+ return -ENXIO;
+
+ /* Wrap the client's request */
+ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
+ cmd.client_request.client_id = cpu_to_le16(padev->client_id);
+ cp_len = min_t(size_t, req_len, sizeof(cmd.client_request.client_cmd));
+ memcpy(cmd.client_request.client_cmd, req, cp_len);
+
+ err = pdsc_adminq_post(pf, &cmd, resp,
+ !!(flags & PDS_AQ_FLAG_FASTPOLL));
+ if (err && err != -EAGAIN)
+ dev_info(pf->dev, "client admin cmd failed: %pe\n",
+ ERR_PTR(err));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pds_client_adminq_cmd);
+
+static void pdsc_auxbus_dev_release(struct device *dev)
+{
+ struct pds_auxiliary_dev *padev =
+ container_of(dev, struct pds_auxiliary_dev, aux_dev.dev);
+
+ kfree(padev);
+}
+
+static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
+ struct pdsc *pf,
+ u16 client_id,
+ char *name)
+{
+ struct auxiliary_device *aux_dev;
+ struct pds_auxiliary_dev *padev;
+ int err;
+
+ padev = kzalloc(sizeof(*padev), GFP_KERNEL);
+ if (!padev)
+ return ERR_PTR(-ENOMEM);
+
+ padev->vf_pdev = cf->pdev;
+ padev->client_id = client_id;
+
+ aux_dev = &padev->aux_dev;
+ aux_dev->name = name;
+ aux_dev->id = cf->uid;
+ aux_dev->dev.parent = cf->dev;
+ aux_dev->dev.release = pdsc_auxbus_dev_release;
+
+ err = auxiliary_device_init(aux_dev);
+ if (err < 0) {
+ dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
+ name, ERR_PTR(err));
+ goto err_out;
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
+ name, ERR_PTR(err));
+ goto err_out_uninit;
+ }
+
+ return padev;
+
+err_out_uninit:
+ auxiliary_device_uninit(aux_dev);
+err_out:
+ kfree(padev);
+ return ERR_PTR(err);
+}
+
+int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+{
+ struct pds_auxiliary_dev *padev;
+ int err = 0;
+
+ mutex_lock(&pf->config_lock);
+
+ padev = pf->vfs[cf->vf_id].padev;
+ if (padev) {
+ pds_client_unregister(pf->pdev, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ padev->client_id = 0;
+ }
+ pf->vfs[cf->vf_id].padev = NULL;
+
+ mutex_unlock(&pf->config_lock);
+ return err;
+}
+
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+{
+ struct pds_auxiliary_dev *padev;
+ enum pds_core_vif_types vt;
+ char devname[PDS_DEVNAME_LEN];
+ u16 vt_support;
+ int client_id;
+ int err = 0;
+
+ mutex_lock(&pf->config_lock);
+
+ /* We only support vDPA so far, so it is the only one to
+ * be verified that it is available in the Core device and
+ * enabled in the devlink param. In the future this might
+ * become a loop for several VIF types.
+ */
+
+ /* Verify that the type is supported and enabled. It is not
+ * an error if there is no auxbus device support for this
+ * VF, it just means something else needs to happen with it.
+ */
+ vt = PDS_DEV_TYPE_VDPA;
+ vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
+ if (!(vt_support &&
+ pf->viftype_status[vt].supported &&
+ pf->viftype_status[vt].enabled))
+ goto out_unlock;
+
+ /* Need to register with FW and get the client_id before
+ * creating the aux device so that the aux client can run
+ * adminq commands as part its probe
+ */
+ snprintf(devname, sizeof(devname), "%s.%s.%d",
+ PDS_CORE_DRV_NAME, pf->viftype_status[vt].name, cf->uid);
+ client_id = pds_client_register(pf->pdev, devname);
+ if (client_id < 0) {
+ err = client_id;
+ goto out_unlock;
+ }
+
+ padev = pdsc_auxbus_dev_register(cf, pf, client_id,
+ pf->viftype_status[vt].name);
+ if (IS_ERR(padev)) {
+ pds_client_unregister(pf->pdev, client_id);
+ err = PTR_ERR(padev);
+ goto out_unlock;
+ }
+ pf->vfs[cf->vf_id].padev = padev;
+
+out_unlock:
+ mutex_unlock(&pf->config_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
new file mode 100644
index 000000000000..483a070d96fa
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+
+static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
+
+int pdsc_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&pds_notify_chain, nb);
+}
+EXPORT_SYMBOL_GPL(pdsc_register_notify);
+
+void pdsc_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&pds_notify_chain, nb);
+}
+EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
+
+void pdsc_notify(unsigned long event, void *data)
+{
+ blocking_notifier_call_chain(&pds_notify_chain, event, data);
+}
+
+void pdsc_intr_free(struct pdsc *pdsc, int index)
+{
+ struct pdsc_intr_info *intr_info;
+
+ if (index >= pdsc->nintrs || index < 0) {
+ WARN(true, "bad intr index %d\n", index);
+ return;
+ }
+
+ intr_info = &pdsc->intr_info[index];
+ if (!intr_info->vector)
+ return;
+ dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
+ __func__, index, intr_info->vector, intr_info->name);
+
+ pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
+ pds_core_intr_clean(&pdsc->intr_ctrl[index]);
+
+ free_irq(intr_info->vector, intr_info->data);
+
+ memset(intr_info, 0, sizeof(*intr_info));
+}
+
+int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data)
+{
+ struct pdsc_intr_info *intr_info;
+ unsigned int index;
+ int err;
+
+ /* Find the first available interrupt */
+ for (index = 0; index < pdsc->nintrs; index++)
+ if (!pdsc->intr_info[index].vector)
+ break;
+ if (index >= pdsc->nintrs) {
+ dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
+ __func__, index, pdsc->nintrs);
+ return -ENOSPC;
+ }
+
+ pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
+ PDS_CORE_INTR_CRED_RESET_COALESCE);
+
+ intr_info = &pdsc->intr_info[index];
+
+ intr_info->index = index;
+ intr_info->data = data;
+ strscpy(intr_info->name, name, sizeof(intr_info->name));
+
+ /* Get the OS vector number for the interrupt */
+ err = pci_irq_vector(pdsc->pdev, index);
+ if (err < 0) {
+ dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
+ index, ERR_PTR(err));
+ goto err_out_free_intr;
+ }
+ intr_info->vector = err;
+
+ /* Init the device's intr mask */
+ pds_core_intr_clean(&pdsc->intr_ctrl[index]);
+ pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
+ pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
+
+ /* Register the isr with a name */
+ err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
+ if (err) {
+ dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
+ intr_info->vector, ERR_PTR(err));
+ goto err_out_free_intr;
+ }
+
+ return index;
+
+err_out_free_intr:
+ pdsc_intr_free(pdsc, index);
+ return err;
+}
+
+static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
+ qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
+ return;
+
+ pdsc_intr_free(pdsc, qcq->intx);
+ qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
+}
+
+static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ char name[PDSC_INTR_NAME_MAX_SZ];
+ int index;
+
+ if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
+ qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "%s-%d-%s",
+ PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
+ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
+ if (index < 0)
+ return index;
+ qcq->intx = index;
+
+ return 0;
+}
+
+void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ struct device *dev = pdsc->dev;
+
+ if (!(qcq && qcq->pdsc))
+ return;
+
+ pdsc_debugfs_del_qcq(qcq);
+
+ pdsc_qcq_intr_free(pdsc, qcq);
+
+ if (qcq->q_base)
+ dma_free_coherent(dev, qcq->q_size,
+ qcq->q_base, qcq->q_base_pa);
+
+ if (qcq->cq_base)
+ dma_free_coherent(dev, qcq->cq_size,
+ qcq->cq_base, qcq->cq_base_pa);
+
+ if (qcq->cq.info)
+ vfree(qcq->cq.info);
+
+ if (qcq->q.info)
+ vfree(qcq->q.info);
+
+ memset(qcq, 0, sizeof(*qcq));
+}
+
+static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
+{
+ struct pdsc_q_info *cur;
+ unsigned int i;
+
+ q->base = base;
+ q->base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ cur->desc = base + (i * q->desc_size);
+}
+
+static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
+{
+ struct pdsc_cq_info *cur;
+ unsigned int i;
+
+ cq->base = base;
+ cq->base_pa = base_pa;
+
+ for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
+ cur->comp = base + (i * cq->desc_size);
+}
+
+int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
+ const char *name, unsigned int flags, unsigned int num_descs,
+ unsigned int desc_size, unsigned int cq_desc_size,
+ unsigned int pid, struct pdsc_qcq *qcq)
+{
+ struct device *dev = pdsc->dev;
+ void *q_base, *cq_base;
+ dma_addr_t cq_base_pa;
+ dma_addr_t q_base_pa;
+ int err;
+
+ qcq->q.info = vzalloc(num_descs * sizeof(*qcq->q.info));
+ if (!qcq->q.info) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ qcq->pdsc = pdsc;
+ qcq->flags = flags;
+ INIT_WORK(&qcq->work, pdsc_work_thread);
+
+ qcq->q.type = type;
+ qcq->q.index = index;
+ qcq->q.num_descs = num_descs;
+ qcq->q.desc_size = desc_size;
+ qcq->q.tail_idx = 0;
+ qcq->q.head_idx = 0;
+ qcq->q.pid = pid;
+ snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
+
+ err = pdsc_qcq_intr_alloc(pdsc, qcq);
+ if (err)
+ goto err_out_free_q_info;
+
+ qcq->cq.info = vzalloc(num_descs * sizeof(*qcq->cq.info));
+ if (!qcq->cq.info) {
+ err = -ENOMEM;
+ goto err_out_free_irq;
+ }
+
+ qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
+ qcq->cq.num_descs = num_descs;
+ qcq->cq.desc_size = cq_desc_size;
+ qcq->cq.tail_idx = 0;
+ qcq->cq.done_color = 1;
+
+ if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
+ /* q & cq need to be contiguous in case of notifyq */
+ qcq->q_size = PDS_PAGE_SIZE +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
+ ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
+ qcq->q_base = dma_alloc_coherent(dev,
+ qcq->q_size + qcq->cq_size,
+ &qcq->q_base_pa,
+ GFP_KERNEL);
+ if (!qcq->q_base) {
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
+ q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
+ pdsc_q_map(&qcq->q, q_base, q_base_pa);
+
+ cq_base = PTR_ALIGN(q_base +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
+ PDS_PAGE_SIZE);
+ cq_base_pa = ALIGN(qcq->q_base_pa +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
+ PDS_PAGE_SIZE);
+
+ } else {
+ /* q DMA descriptors */
+ qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
+ qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
+ &qcq->q_base_pa,
+ GFP_KERNEL);
+ if (!qcq->q_base) {
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
+ q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
+ pdsc_q_map(&qcq->q, q_base, q_base_pa);
+
+ /* cq DMA descriptors */
+ qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
+ qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
+ &qcq->cq_base_pa,
+ GFP_KERNEL);
+ if (!qcq->cq_base) {
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+ cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
+ cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
+ }
+
+ pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
+ qcq->cq.bound_q = &qcq->q;
+
+ pdsc_debugfs_add_qcq(pdsc, qcq);
+
+ return 0;
+
+err_out_free_q:
+ dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
+err_out_free_cq_info:
+ vfree(qcq->cq.info);
+err_out_free_irq:
+ pdsc_qcq_intr_free(pdsc, qcq);
+err_out_free_q_info:
+ vfree(qcq->q.info);
+ memset(qcq, 0, sizeof(*qcq));
+err_out:
+ dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
+ return err;
+}
+
+static int pdsc_core_init(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .init.opcode = PDS_CORE_CMD_INIT,
+ };
+ struct pds_core_dev_init_data_out cido;
+ struct pds_core_dev_init_data_in cidi;
+ u32 dbid_count;
+ u32 dbpage_num;
+ size_t sz;
+ int err;
+
+ cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
+ cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
+ cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
+ cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
+ cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
+ cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
+ cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
+
+ mutex_lock(&pdsc->devcmd_lock);
+
+ sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
+ memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
+
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ if (!err) {
+ sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
+ memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
+ }
+
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err) {
+ dev_err(pdsc->dev, "Device init command failed: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
+
+ dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
+ dbpage_num = pdsc->hw_index * dbid_count;
+ pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
+ if (!pdsc->kern_dbpage) {
+ dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
+ return -ENOMEM;
+ }
+
+ pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
+ pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
+ pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
+
+ pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
+ pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
+ pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
+
+ pdsc->last_eid = 0;
+
+ return err;
+}
+
+static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
+ .vif_id = PDS_DEV_TYPE_VDPA,
+ .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
+ [PDS_DEV_TYPE_MAX] = {}
+};
+
+static int pdsc_viftypes_init(struct pdsc *pdsc)
+{
+ enum pds_core_vif_types vt;
+
+ pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ GFP_KERNEL);
+ if (!pdsc->viftype_status)
+ return -ENOMEM;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ bool vt_support;
+
+ if (!pdsc_viftype_defaults[vt].name)
+ continue;
+
+ /* Grab the defaults */
+ pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
+
+ /* See what the Core device has for support */
+ vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+ dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
+ pdsc->viftype_status[vt].name,
+ vt_support ? "" : "not ");
+
+ pdsc->viftype_status[vt].supported = vt_support;
+ }
+
+ return 0;
+}
+
+int pdsc_setup(struct pdsc *pdsc, bool init)
+{
+ int numdescs;
+ int err;
+
+ if (init)
+ err = pdsc_dev_init(pdsc);
+ else
+ err = pdsc_dev_reinit(pdsc);
+ if (err)
+ return err;
+
+ /* Scale the descriptor ring length based on number of CPUs and VFs */
+ numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+ numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+ numdescs = roundup_pow_of_two(numdescs);
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ goto err_out_teardown;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_teardown;
+
+ /* NotifyQ rides on the AdminQ interrupt */
+ pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
+
+ /* Set up the Core with the AdminQ and NotifyQ info */
+ err = pdsc_core_init(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ /* Set up the VIFs */
+ err = pdsc_viftypes_init(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ if (init)
+ pdsc_debugfs_add_viftype(pdsc);
+
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return 0;
+
+err_out_teardown:
+ pdsc_teardown(pdsc, init);
+ return err;
+}
+
+void pdsc_teardown(struct pdsc *pdsc, bool removing)
+{
+ int i;
+
+ pdsc_devcmd_reset(pdsc);
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ kfree(pdsc->viftype_status);
+ pdsc->viftype_status = NULL;
+
+ if (pdsc->intr_info) {
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+ if (removing) {
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ }
+ }
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+}
+
+int pdsc_start(struct pdsc *pdsc)
+{
+ pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
+ PDS_CORE_INTR_MASK_CLEAR);
+
+ return 0;
+}
+
+void pdsc_stop(struct pdsc *pdsc)
+{
+ int i;
+
+ if (!pdsc->intr_info)
+ return;
+
+ /* Mask interrupts that are in use */
+ for (i = 0; i < pdsc->nintrs; i++)
+ if (pdsc->intr_info[i].vector)
+ pds_core_intr_mask(&pdsc->intr_ctrl[i],
+ PDS_CORE_INTR_MASK_SET);
+}
+
+static void pdsc_fw_down(struct pdsc *pdsc)
+{
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+ .reset.state = 0,
+ };
+
+ if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ dev_err(pdsc->dev, "%s: already happening\n", __func__);
+ return;
+ }
+
+ /* Notify clients of fw_down */
+ devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+ pdsc_notify(PDS_EVENT_RESET, &reset_event);
+
+ pdsc_stop(pdsc);
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+}
+
+static void pdsc_fw_up(struct pdsc *pdsc)
+{
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+ .reset.state = 1,
+ };
+ int err;
+
+ if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
+ return;
+ }
+
+ err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
+ if (err)
+ goto err_out;
+
+ err = pdsc_start(pdsc);
+ if (err)
+ goto err_out;
+
+ /* Notify clients of fw_up */
+ pdsc->fw_recoveries++;
+ devlink_health_reporter_state_update(pdsc->fw_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ pdsc_notify(PDS_EVENT_RESET, &reset_event);
+
+ return;
+
+err_out:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+}
+
+void pdsc_health_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
+ unsigned long mask;
+ bool healthy;
+
+ mutex_lock(&pdsc->config_lock);
+
+ /* Don't do a check when in a transition state */
+ mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (pdsc->state & mask)
+ goto out_unlock;
+
+ healthy = pdsc_is_fw_good(pdsc);
+ dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
+ __func__, healthy, pdsc->fw_status, pdsc->last_hb);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ if (healthy)
+ pdsc_fw_up(pdsc);
+ } else {
+ if (!healthy)
+ pdsc_fw_down(pdsc);
+ }
+
+ pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+out_unlock:
+ mutex_unlock(&pdsc->config_lock);
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
new file mode 100644
index 000000000000..e545fafc4819
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDSC_H_
+#define _PDSC_H_
+
+#include <linux/debugfs.h>
+#include <net/devlink.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_intr.h>
+
+#define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
+
+#define PDSC_WATCHDOG_SECS 5
+#define PDSC_QUEUE_NAME_MAX_SZ 32
+#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+#define PDSC_TEARDOWN_RECOVERY false
+#define PDSC_TEARDOWN_REMOVING true
+#define PDSC_SETUP_RECOVERY false
+#define PDSC_SETUP_INIT true
+
+struct pdsc_dev_bar {
+ void __iomem *vaddr;
+ phys_addr_t bus_addr;
+ unsigned long len;
+ int res_index;
+};
+
+struct pdsc;
+
+struct pdsc_vf {
+ struct pds_auxiliary_dev *padev;
+ struct pdsc *vf;
+ u16 index;
+ __le16 vif_types[PDS_DEV_TYPE_MAX];
+};
+
+struct pdsc_devinfo {
+ u8 asic_type;
+ u8 asic_rev;
+ char fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN + 1];
+ char serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN + 1];
+};
+
+struct pdsc_queue {
+ struct pdsc_q_info *info;
+ u64 dbval;
+ u16 head_idx;
+ u16 tail_idx;
+ u8 hw_type;
+ unsigned int index;
+ unsigned int num_descs;
+ u64 dbell_count;
+ u64 features;
+ unsigned int type;
+ unsigned int hw_index;
+ union {
+ void *base;
+ struct pds_core_admin_cmd *adminq;
+ };
+ dma_addr_t base_pa; /* must be page aligned */
+ unsigned int desc_size;
+ unsigned int pid;
+ char name[PDSC_QUEUE_NAME_MAX_SZ];
+};
+
+#define PDSC_INTR_NAME_MAX_SZ 32
+
+struct pdsc_intr_info {
+ char name[PDSC_INTR_NAME_MAX_SZ];
+ unsigned int index;
+ unsigned int vector;
+ void *data;
+};
+
+struct pdsc_cq_info {
+ void *comp;
+};
+
+struct pdsc_buf_info {
+ struct page *page;
+ dma_addr_t dma_addr;
+ u32 page_offset;
+ u32 len;
+};
+
+struct pdsc_q_info {
+ union {
+ void *desc;
+ struct pdsc_admin_cmd *adminq_desc;
+ };
+ unsigned int bytes;
+ unsigned int nbufs;
+ struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
+ struct pdsc_wait_context *wc;
+ void *dest;
+};
+
+struct pdsc_cq {
+ struct pdsc_cq_info *info;
+ struct pdsc_queue *bound_q;
+ struct pdsc_intr_info *bound_intr;
+ u16 tail_idx;
+ bool done_color;
+ unsigned int num_descs;
+ unsigned int desc_size;
+ void *base;
+ dma_addr_t base_pa; /* must be page aligned */
+} ____cacheline_aligned_in_smp;
+
+struct pdsc_qcq {
+ struct pdsc *pdsc;
+ void *q_base;
+ dma_addr_t q_base_pa; /* might not be page aligned */
+ void *cq_base;
+ dma_addr_t cq_base_pa; /* might not be page aligned */
+ u32 q_size;
+ u32 cq_size;
+ bool armed;
+ unsigned int flags;
+
+ struct work_struct work;
+ struct pdsc_queue q;
+ struct pdsc_cq cq;
+ int intx;
+
+ u32 accum_work;
+ struct dentry *dentry;
+};
+
+struct pdsc_viftype {
+ char *name;
+ bool supported;
+ bool enabled;
+ int dl_id;
+ int vif_id;
+ struct pds_auxiliary_dev *padev;
+};
+
+/* No state flags set means we are in a steady running state */
+enum pdsc_state_flags {
+ PDSC_S_FW_DEAD, /* stopped, wait on startup or recovery */
+ PDSC_S_INITING_DRIVER, /* initial startup from probe */
+ PDSC_S_STOPPING_DRIVER, /* driver remove */
+
+ /* leave this as last */
+ PDSC_S_STATE_SIZE
+};
+
+struct pdsc {
+ struct pci_dev *pdev;
+ struct dentry *dentry;
+ struct device *dev;
+ struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pdsc_vf *vfs;
+ int num_vfs;
+ int vf_id;
+ int hw_index;
+ int uid;
+
+ unsigned long state;
+ u8 fw_status;
+ u8 fw_generation;
+ unsigned long last_fw_time;
+ u32 last_hb;
+ struct timer_list wdtimer;
+ unsigned int wdtimer_period;
+ struct work_struct health_work;
+ struct devlink_health_reporter *fw_reporter;
+ u32 fw_recoveries;
+
+ struct pdsc_devinfo dev_info;
+ struct pds_core_dev_identity dev_ident;
+ unsigned int nintrs;
+ struct pdsc_intr_info *intr_info; /* array of nintrs elements */
+
+ struct workqueue_struct *wq;
+
+ unsigned int devcmd_timeout;
+ struct mutex devcmd_lock; /* lock for dev_cmd operations */
+ struct mutex config_lock; /* lock for configuration operations */
+ spinlock_t adminq_lock; /* lock for adminq operations */
+ struct pds_core_dev_info_regs __iomem *info_regs;
+ struct pds_core_dev_cmd_regs __iomem *cmd_regs;
+ struct pds_core_intr __iomem *intr_ctrl;
+ u64 __iomem *intr_status;
+ u64 __iomem *db_pages;
+ dma_addr_t phy_db_pages;
+ u64 __iomem *kern_dbpage;
+
+ struct pdsc_qcq adminqcq;
+ struct pdsc_qcq notifyqcq;
+ u64 last_eid;
+ struct pdsc_viftype *viftype_status;
+};
+
+/** enum pds_core_dbell_bits - bitwise composition of dbell values.
+ *
+ * @PDS_CORE_DBELL_QID_MASK: unshifted mask of valid queue id bits.
+ * @PDS_CORE_DBELL_QID_SHIFT: queue id shift amount in dbell value.
+ * @PDS_CORE_DBELL_QID: macro to build QID component of dbell value.
+ *
+ * @PDS_CORE_DBELL_RING_MASK: unshifted mask of valid ring bits.
+ * @PDS_CORE_DBELL_RING_SHIFT: ring shift amount in dbell value.
+ * @PDS_CORE_DBELL_RING: macro to build ring component of dbell value.
+ *
+ * @PDS_CORE_DBELL_RING_0: ring zero dbell component value.
+ * @PDS_CORE_DBELL_RING_1: ring one dbell component value.
+ * @PDS_CORE_DBELL_RING_2: ring two dbell component value.
+ * @PDS_CORE_DBELL_RING_3: ring three dbell component value.
+ *
+ * @PDS_CORE_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed.
+ */
+enum pds_core_dbell_bits {
+ PDS_CORE_DBELL_QID_MASK = 0xffffff,
+ PDS_CORE_DBELL_QID_SHIFT = 24,
+
+#define PDS_CORE_DBELL_QID(n) \
+ (((u64)(n) & PDS_CORE_DBELL_QID_MASK) << PDS_CORE_DBELL_QID_SHIFT)
+
+ PDS_CORE_DBELL_RING_MASK = 0x7,
+ PDS_CORE_DBELL_RING_SHIFT = 16,
+
+#define PDS_CORE_DBELL_RING(n) \
+ (((u64)(n) & PDS_CORE_DBELL_RING_MASK) << PDS_CORE_DBELL_RING_SHIFT)
+
+ PDS_CORE_DBELL_RING_0 = 0,
+ PDS_CORE_DBELL_RING_1 = PDS_CORE_DBELL_RING(1),
+ PDS_CORE_DBELL_RING_2 = PDS_CORE_DBELL_RING(2),
+ PDS_CORE_DBELL_RING_3 = PDS_CORE_DBELL_RING(3),
+
+ PDS_CORE_DBELL_INDEX_MASK = 0xffff,
+};
+
+static inline void pds_core_dbell_ring(u64 __iomem *db_page,
+ enum pds_core_logical_qtype qtype,
+ u64 val)
+{
+ writeq(val, &db_page[qtype]);
+}
+
+int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_enable_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+
+void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num);
+
+void pdsc_debugfs_create(void);
+void pdsc_debugfs_destroy(void);
+void pdsc_debugfs_add_dev(struct pdsc *pdsc);
+void pdsc_debugfs_del_dev(struct pdsc *pdsc);
+void pdsc_debugfs_add_ident(struct pdsc *pdsc);
+void pdsc_debugfs_add_viftype(struct pdsc *pdsc);
+void pdsc_debugfs_add_irqs(struct pdsc *pdsc);
+void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq);
+void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq);
+
+int pdsc_err_to_errno(enum pds_core_status_code code);
+bool pdsc_is_fw_running(struct pdsc *pdsc);
+bool pdsc_is_fw_good(struct pdsc *pdsc);
+int pdsc_devcmd(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+int pdsc_devcmd_init(struct pdsc *pdsc);
+int pdsc_devcmd_reset(struct pdsc *pdsc);
+int pdsc_dev_reinit(struct pdsc *pdsc);
+int pdsc_dev_init(struct pdsc *pdsc);
+
+int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data);
+void pdsc_intr_free(struct pdsc *pdsc, int index);
+void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq);
+int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
+ const char *name, unsigned int flags, unsigned int num_descs,
+ unsigned int desc_size, unsigned int cq_desc_size,
+ unsigned int pid, struct pdsc_qcq *qcq);
+int pdsc_setup(struct pdsc *pdsc, bool init);
+void pdsc_teardown(struct pdsc *pdsc, bool removing);
+int pdsc_start(struct pdsc *pdsc);
+void pdsc_stop(struct pdsc *pdsc);
+void pdsc_health_thread(struct work_struct *work);
+
+int pdsc_register_notify(struct notifier_block *nb);
+void pdsc_unregister_notify(struct notifier_block *nb);
+void pdsc_notify(unsigned long event, void *data);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+
+void pdsc_process_adminq(struct pdsc_qcq *qcq);
+void pdsc_work_thread(struct work_struct *work);
+irqreturn_t pdsc_adminq_isr(int irq, void *data);
+
+int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
new file mode 100644
index 000000000000..8ec392299b7d
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+
+#include "core.h"
+
+static struct dentry *pdsc_dir;
+
+void pdsc_debugfs_create(void)
+{
+ pdsc_dir = debugfs_create_dir(PDS_CORE_DRV_NAME, NULL);
+}
+
+void pdsc_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(pdsc_dir);
+}
+
+void pdsc_debugfs_add_dev(struct pdsc *pdsc)
+{
+ pdsc->dentry = debugfs_create_dir(pci_name(pdsc->pdev), pdsc_dir);
+
+ debugfs_create_ulong("state", 0400, pdsc->dentry, &pdsc->state);
+}
+
+void pdsc_debugfs_del_dev(struct pdsc *pdsc)
+{
+ debugfs_remove_recursive(pdsc->dentry);
+ pdsc->dentry = NULL;
+}
+
+static int identity_show(struct seq_file *seq, void *v)
+{
+ struct pdsc *pdsc = seq->private;
+ struct pds_core_dev_identity *ident;
+ int vt;
+
+ ident = &pdsc->dev_ident;
+
+ seq_printf(seq, "fw_heartbeat: 0x%x\n",
+ ioread32(&pdsc->info_regs->fw_heartbeat));
+
+ seq_printf(seq, "nlifs: %d\n",
+ le32_to_cpu(ident->nlifs));
+ seq_printf(seq, "nintrs: %d\n",
+ le32_to_cpu(ident->nintrs));
+ seq_printf(seq, "ndbpgs_per_lif: %d\n",
+ le32_to_cpu(ident->ndbpgs_per_lif));
+ seq_printf(seq, "intr_coal_mult: %d\n",
+ le32_to_cpu(ident->intr_coal_mult));
+ seq_printf(seq, "intr_coal_div: %d\n",
+ le32_to_cpu(ident->intr_coal_div));
+
+ seq_puts(seq, "vif_types: ");
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++)
+ seq_printf(seq, "%d ",
+ le16_to_cpu(pdsc->dev_ident.vif_types[vt]));
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(identity);
+
+void pdsc_debugfs_add_ident(struct pdsc *pdsc)
+{
+ debugfs_create_file("identity", 0400, pdsc->dentry,
+ pdsc, &identity_fops);
+}
+
+static int viftype_show(struct seq_file *seq, void *v)
+{
+ struct pdsc *pdsc = seq->private;
+ int vt;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ if (!pdsc->viftype_status[vt].name)
+ continue;
+
+ seq_printf(seq, "%s\t%d supported %d enabled\n",
+ pdsc->viftype_status[vt].name,
+ pdsc->viftype_status[vt].supported,
+ pdsc->viftype_status[vt].enabled);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(viftype);
+
+void pdsc_debugfs_add_viftype(struct pdsc *pdsc)
+{
+ debugfs_create_file("viftypes", 0400, pdsc->dentry,
+ pdsc, &viftype_fops);
+}
+
+static const struct debugfs_reg32 intr_ctrl_regs[] = {
+ { .name = "coal_init", .offset = 0, },
+ { .name = "mask", .offset = 4, },
+ { .name = "credits", .offset = 8, },
+ { .name = "mask_on_assert", .offset = 12, },
+ { .name = "coal_timer", .offset = 16, },
+};
+
+void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
+ struct dentry *intr_dentry;
+ struct debugfs_regset32 *intr_ctrl_regset;
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_cq *cq = &qcq->cq;
+
+ qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
+ if (IS_ERR_OR_NULL(qcq_dentry))
+ return;
+ qcq->dentry = qcq_dentry;
+
+ debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
+ debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
+ debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
+ debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
+ debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work);
+
+ q_dentry = debugfs_create_dir("q", qcq->dentry);
+ if (IS_ERR_OR_NULL(q_dentry))
+ return;
+
+ debugfs_create_u32("index", 0400, q_dentry, &q->index);
+ debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
+ debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
+ debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
+
+ debugfs_create_u16("tail", 0400, q_dentry, &q->tail_idx);
+ debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
+
+ cq_dentry = debugfs_create_dir("cq", qcq->dentry);
+ if (IS_ERR_OR_NULL(cq_dentry))
+ return;
+
+ debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
+ debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
+ debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
+ debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
+ debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
+
+ if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ intr_dentry = debugfs_create_dir("intr", qcq->dentry);
+ if (IS_ERR_OR_NULL(intr_dentry))
+ return;
+
+ debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
+ debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
+
+ intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
+ if (!intr_ctrl_regset)
+ return;
+ intr_ctrl_regset->regs = intr_ctrl_regs;
+ intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs);
+ intr_ctrl_regset->base = &pdsc->intr_ctrl[intr->index];
+
+ debugfs_create_regset32("intr_ctrl", 0400, intr_dentry,
+ intr_ctrl_regset);
+ }
+};
+
+void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq)
+{
+ debugfs_remove_recursive(qcq->dentry);
+ qcq->dentry = NULL;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
new file mode 100644
index 000000000000..f7c597ea5daf
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+
+#include "core.h"
+
+int pdsc_err_to_errno(enum pds_core_status_code code)
+{
+ switch (code) {
+ case PDS_RC_SUCCESS:
+ return 0;
+ case PDS_RC_EVERSION:
+ case PDS_RC_EQTYPE:
+ case PDS_RC_EQID:
+ case PDS_RC_EINVAL:
+ case PDS_RC_ENOSUPP:
+ return -EINVAL;
+ case PDS_RC_EPERM:
+ return -EPERM;
+ case PDS_RC_ENOENT:
+ return -ENOENT;
+ case PDS_RC_EAGAIN:
+ return -EAGAIN;
+ case PDS_RC_ENOMEM:
+ return -ENOMEM;
+ case PDS_RC_EFAULT:
+ return -EFAULT;
+ case PDS_RC_EBUSY:
+ return -EBUSY;
+ case PDS_RC_EEXIST:
+ return -EEXIST;
+ case PDS_RC_EVFID:
+ return -ENODEV;
+ case PDS_RC_ECLIENT:
+ return -ECHILD;
+ case PDS_RC_ENOSPC:
+ return -ENOSPC;
+ case PDS_RC_ERANGE:
+ return -ERANGE;
+ case PDS_RC_BAD_ADDR:
+ return -EFAULT;
+ case PDS_RC_EOPCODE:
+ case PDS_RC_EINTR:
+ case PDS_RC_DEV_CMD:
+ case PDS_RC_ERROR:
+ case PDS_RC_ERDMA:
+ case PDS_RC_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+bool pdsc_is_fw_running(struct pdsc *pdsc)
+{
+ pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
+ pdsc->last_fw_time = jiffies;
+ pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
+
+ /* Firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ return (pdsc->fw_status != 0xff) &&
+ (pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
+}
+
+bool pdsc_is_fw_good(struct pdsc *pdsc)
+{
+ u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+ return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation;
+}
+
+static u8 pdsc_devcmd_status(struct pdsc *pdsc)
+{
+ return ioread8(&pdsc->cmd_regs->comp.status);
+}
+
+static bool pdsc_devcmd_done(struct pdsc *pdsc)
+{
+ return ioread32(&pdsc->cmd_regs->done) & PDS_CORE_DEV_CMD_DONE;
+}
+
+static void pdsc_devcmd_dbell(struct pdsc *pdsc)
+{
+ iowrite32(0, &pdsc->cmd_regs->done);
+ iowrite32(1, &pdsc->cmd_regs->doorbell);
+}
+
+static void pdsc_devcmd_clean(struct pdsc *pdsc)
+{
+ iowrite32(0, &pdsc->cmd_regs->doorbell);
+ memset_io(&pdsc->cmd_regs->cmd, 0, sizeof(pdsc->cmd_regs->cmd));
+}
+
+static const char *pdsc_devcmd_str(int opcode)
+{
+ switch (opcode) {
+ case PDS_CORE_CMD_NOP:
+ return "PDS_CORE_CMD_NOP";
+ case PDS_CORE_CMD_IDENTIFY:
+ return "PDS_CORE_CMD_IDENTIFY";
+ case PDS_CORE_CMD_RESET:
+ return "PDS_CORE_CMD_RESET";
+ case PDS_CORE_CMD_INIT:
+ return "PDS_CORE_CMD_INIT";
+ case PDS_CORE_CMD_FW_DOWNLOAD:
+ return "PDS_CORE_CMD_FW_DOWNLOAD";
+ case PDS_CORE_CMD_FW_CONTROL:
+ return "PDS_CORE_CMD_FW_CONTROL";
+ default:
+ return "PDS_CORE_CMD_UNKNOWN";
+ }
+}
+
+static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
+{
+ struct device *dev = pdsc->dev;
+ unsigned long start_time;
+ unsigned long max_wait;
+ unsigned long duration;
+ int timeout = 0;
+ int done = 0;
+ int err = 0;
+ int status;
+ int opcode;
+
+ opcode = ioread8(&pdsc->cmd_regs->cmd.opcode);
+
+ start_time = jiffies;
+ max_wait = start_time + (max_seconds * HZ);
+
+ while (!done && !timeout) {
+ done = pdsc_devcmd_done(pdsc);
+ if (done)
+ break;
+
+ timeout = time_after(jiffies, max_wait);
+ if (timeout)
+ break;
+
+ usleep_range(100, 200);
+ }
+ duration = jiffies - start_time;
+
+ if (done && duration > HZ)
+ dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
+ opcode, pdsc_devcmd_str(opcode), duration / HZ);
+
+ if (!done || timeout) {
+ dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
+ opcode, pdsc_devcmd_str(opcode), done, timeout,
+ max_seconds);
+ err = -ETIMEDOUT;
+ pdsc_devcmd_clean(pdsc);
+ }
+
+ status = pdsc_devcmd_status(pdsc);
+ err = pdsc_err_to_errno(status);
+ if (err && err != -EAGAIN)
+ dev_err(dev, "DEVCMD %d %s failed, status=%d err %d %pe\n",
+ opcode, pdsc_devcmd_str(opcode), status, err,
+ ERR_PTR(err));
+
+ return err;
+}
+
+int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds)
+{
+ int err;
+
+ memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
+ pdsc_devcmd_dbell(pdsc);
+ err = pdsc_devcmd_wait(pdsc, max_seconds);
+ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
+
+ if (err == -ENXIO || err == -ETIMEDOUT)
+ queue_work(pdsc->wq, &pdsc->health_work);
+
+ return err;
+}
+
+int pdsc_devcmd(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds)
+{
+ int err;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, cmd, comp, max_seconds);
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ return err;
+}
+
+int pdsc_devcmd_init(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .opcode = PDS_CORE_CMD_INIT,
+ };
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+int pdsc_devcmd_reset(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .reset.opcode = PDS_CORE_CMD_RESET,
+ };
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_devcmd_identify_locked(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .identify.opcode = PDS_CORE_CMD_IDENTIFY,
+ .identify.ver = PDS_CORE_IDENTITY_VERSION_1,
+ };
+
+ return pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static void pdsc_init_devinfo(struct pdsc *pdsc)
+{
+ pdsc->dev_info.asic_type = ioread8(&pdsc->info_regs->asic_type);
+ pdsc->dev_info.asic_rev = ioread8(&pdsc->info_regs->asic_rev);
+ pdsc->fw_generation = PDS_CORE_FW_STS_F_GENERATION &
+ ioread8(&pdsc->info_regs->fw_status);
+
+ memcpy_fromio(pdsc->dev_info.fw_version,
+ pdsc->info_regs->fw_version,
+ PDS_CORE_DEVINFO_FWVERS_BUFLEN);
+ pdsc->dev_info.fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN] = 0;
+
+ memcpy_fromio(pdsc->dev_info.serial_num,
+ pdsc->info_regs->serial_num,
+ PDS_CORE_DEVINFO_SERIAL_BUFLEN);
+ pdsc->dev_info.serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN] = 0;
+
+ dev_dbg(pdsc->dev, "fw_version %s\n", pdsc->dev_info.fw_version);
+}
+
+static int pdsc_identify(struct pdsc *pdsc)
+{
+ struct pds_core_drv_identity drv = {};
+ size_t sz;
+ int err;
+
+ drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+ snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
+
+ /* Next let's get some info about the device
+ * We use the devcmd_lock at this level in order to
+ * get safe access to the cmd_regs->data before anyone
+ * else can mess it up
+ */
+ mutex_lock(&pdsc->devcmd_lock);
+
+ sz = min_t(size_t, sizeof(drv), sizeof(pdsc->cmd_regs->data));
+ memcpy_toio(&pdsc->cmd_regs->data, &drv, sz);
+
+ err = pdsc_devcmd_identify_locked(pdsc);
+ if (!err) {
+ sz = min_t(size_t, sizeof(pdsc->dev_ident),
+ sizeof(pdsc->cmd_regs->data));
+ memcpy_fromio(&pdsc->dev_ident, &pdsc->cmd_regs->data, sz);
+ }
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ if (err) {
+ dev_err(pdsc->dev, "Cannot identify device: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ if (isprint(pdsc->dev_info.fw_version[0]) &&
+ isascii(pdsc->dev_info.fw_version[0]))
+ dev_info(pdsc->dev, "FW: %.*s\n",
+ (int)(sizeof(pdsc->dev_info.fw_version) - 1),
+ pdsc->dev_info.fw_version);
+ else
+ dev_info(pdsc->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)pdsc->dev_info.fw_version[0],
+ (u8)pdsc->dev_info.fw_version[1],
+ (u8)pdsc->dev_info.fw_version[2],
+ (u8)pdsc->dev_info.fw_version[3]);
+
+ return 0;
+}
+
+int pdsc_dev_reinit(struct pdsc *pdsc)
+{
+ pdsc_init_devinfo(pdsc);
+
+ return pdsc_identify(pdsc);
+}
+
+int pdsc_dev_init(struct pdsc *pdsc)
+{
+ unsigned int nintrs;
+ int err;
+
+ /* Initial init and reset of device */
+ pdsc_init_devinfo(pdsc);
+ pdsc->devcmd_timeout = PDS_CORE_DEVCMD_TIMEOUT;
+
+ err = pdsc_devcmd_reset(pdsc);
+ if (err)
+ return err;
+
+ err = pdsc_identify(pdsc);
+ if (err)
+ return err;
+
+ pdsc_debugfs_add_ident(pdsc);
+
+ /* Now we can reserve interrupts */
+ nintrs = le32_to_cpu(pdsc->dev_ident.nintrs);
+ nintrs = min_t(unsigned int, num_online_cpus(), nintrs);
+
+ /* Get intr_info struct array for tracking */
+ pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
+ if (!pdsc->intr_info) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
+ if (err != nintrs) {
+ dev_err(pdsc->dev, "Can't get %d intrs from OS: %pe\n",
+ nintrs, ERR_PTR(err));
+ err = -ENOSPC;
+ goto err_out;
+ }
+ pdsc->nintrs = nintrs;
+
+ return 0;
+
+err_out:
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
new file mode 100644
index 000000000000..9c6b3653c1c7
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include "core.h"
+#include <linux/pds/pds_auxbus.h>
+
+static struct
+pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
+ enum devlink_param_type dl_id)
+{
+ int vt;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ if (pdsc->viftype_status[vt].dl_id == dl_id)
+ return &pdsc->viftype_status[vt];
+ }
+
+ return NULL;
+}
+
+int pdsc_dl_enable_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry)
+ return -ENOENT;
+
+ ctx->val.vbool = vt_entry->enabled;
+
+ return 0;
+}
+
+int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+ int err = 0;
+ int vf_id;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry || !vt_entry->supported)
+ return -EOPNOTSUPP;
+
+ if (vt_entry->enabled == ctx->val.vbool)
+ return 0;
+
+ vt_entry->enabled = ctx->val.vbool;
+ for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
+ struct pdsc *vf = pdsc->vfs[vf_id].vf;
+
+ err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
+ pdsc_auxbus_dev_del(vf, pdsc);
+ }
+
+ return err;
+}
+
+int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry || !vt_entry->supported)
+ return -EOPNOTSUPP;
+
+ if (!pdsc->viftype_status[vt_entry->vif_id].supported)
+ return -ENODEV;
+
+ return 0;
+}
+
+int pdsc_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+
+ return pdsc_firmware_update(pdsc, params->fw, extack);
+}
+
+static char *fw_slotnames[] = {
+ "fw.goldfw",
+ "fw.mainfwa",
+ "fw.mainfwb",
+};
+
+int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_GET_LIST,
+ };
+ struct pds_core_fw_list_info fw_list;
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+ char buf[16];
+ int listlen;
+ int err;
+ int i;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err && err != -EIO)
+ return err;
+
+ listlen = fw_list.num_fw_slots;
+ for (i = 0; i < listlen; i++) {
+ if (i < ARRAY_SIZE(fw_slotnames))
+ strscpy(buf, fw_slotnames[i], sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "fw.slot_%d", i);
+ err = devlink_info_version_stored_put(req, buf,
+ fw_list.fw_names[i].fw_version);
+ }
+
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ pdsc->dev_info.fw_version);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "0x%x", pdsc->dev_info.asic_type);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "0x%x", pdsc->dev_info.asic_rev);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (err)
+ return err;
+
+ return devlink_info_serial_number_put(req, pdsc->dev_info.serial_num);
+}
+
+int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_health_reporter_priv(reporter);
+ int err;
+
+ mutex_lock(&pdsc->config_lock);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ err = devlink_fmsg_string_pair_put(fmsg, "Status", "dead");
+ else if (!pdsc_is_fw_good(pdsc))
+ err = devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy");
+ else
+ err = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
+
+ mutex_unlock(&pdsc->config_lock);
+
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "State",
+ pdsc->fw_status &
+ ~PDS_CORE_FW_STS_F_GENERATION);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "Generation",
+ pdsc->fw_generation >> 4);
+ if (err)
+ return err;
+
+ return devlink_fmsg_u32_pair_put(fmsg, "Recoveries",
+ pdsc->fw_recoveries);
+}
diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
new file mode 100644
index 000000000000..90a811f3878a
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/fw.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include "core.h"
+
+/* The worst case wait for the install activity is about 25 minutes when
+ * installing a new CPLD, which is very seldom. Normal is about 30-35
+ * seconds. Since the driver can't tell if a CPLD update will happen we
+ * set the timeout for the ugly case.
+ */
+#define PDSC_FW_INSTALL_TIMEOUT (25 * 60)
+#define PDSC_FW_SELECT_TIMEOUT 30
+
+/* Number of periodic log updates during fw file download */
+#define PDSC_FW_INTERVAL_FRACTION 32
+
+static int pdsc_devcmd_fw_download_locked(struct pdsc *pdsc, u64 addr,
+ u32 offset, u32 length)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_download.opcode = PDS_CORE_CMD_FW_DOWNLOAD,
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
+ };
+ union pds_core_dev_comp comp;
+
+ return pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_devcmd_fw_install(struct pdsc *pdsc)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_INSTALL_ASYNC
+ };
+ union pds_core_dev_comp comp;
+ int err;
+
+ err = pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ if (err < 0)
+ return err;
+
+ return comp.fw_control.slot;
+}
+
+static int pdsc_devcmd_fw_activate(struct pdsc *pdsc,
+ enum pds_core_fw_slot slot)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_ACTIVATE_ASYNC,
+ .fw_control.slot = slot
+ };
+ union pds_core_dev_comp comp;
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_fw_status_long_wait(struct pdsc *pdsc,
+ const char *label,
+ unsigned long timeout,
+ u8 fw_cmd,
+ struct netlink_ext_ack *extack)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = fw_cmd,
+ };
+ union pds_core_dev_comp comp;
+ unsigned long start_time;
+ unsigned long end_time;
+ int err;
+
+ /* Ping on the status of the long running async install
+ * command. We get EAGAIN while the command is still
+ * running, else we get the final command status.
+ */
+ start_time = jiffies;
+ end_time = start_time + (timeout * HZ);
+ do {
+ err = pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ msleep(20);
+ } while (time_before(jiffies, end_time) &&
+ (err == -EAGAIN || err == -ETIMEDOUT));
+
+ if (err == -EAGAIN || err == -ETIMEDOUT) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait timed out");
+ dev_err(pdsc->dev, "DEV_CMD firmware wait %s timed out\n",
+ label);
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait failed");
+ }
+
+ return err;
+}
+
+int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_sz, copy_sz, offset;
+ struct devlink *dl;
+ int next_interval;
+ u64 data_addr;
+ int err = 0;
+ int fw_slot;
+
+ dev_info(pdsc->dev, "Installing firmware\n");
+
+ dl = priv_to_devlink(pdsc);
+ devlink_flash_update_status_notify(dl, "Preparing to flash",
+ NULL, 0, 0);
+
+ buf_sz = sizeof(pdsc->cmd_regs->data);
+
+ dev_dbg(pdsc->dev,
+ "downloading firmware - size %d part_sz %d nparts %lu\n",
+ (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
+
+ offset = 0;
+ next_interval = 0;
+ data_addr = offsetof(struct pds_core_dev_cmd_regs, data);
+ while (offset < fw->size) {
+ if (offset >= next_interval) {
+ devlink_flash_update_status_notify(dl, "Downloading",
+ NULL, offset,
+ fw->size);
+ next_interval = offset +
+ (fw->size / PDSC_FW_INTERVAL_FRACTION);
+ }
+
+ copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
+ mutex_lock(&pdsc->devcmd_lock);
+ memcpy_toio(&pdsc->cmd_regs->data, fw->data + offset, copy_sz);
+ err = pdsc_devcmd_fw_download_locked(pdsc, data_addr,
+ offset, copy_sz);
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err) {
+ dev_err(pdsc->dev,
+ "download failed offset 0x%x addr 0x%llx len 0x%x: %pe\n",
+ offset, data_addr, copy_sz, ERR_PTR(err));
+ NL_SET_ERR_MSG_MOD(extack, "Segment download failed");
+ goto err_out;
+ }
+ offset += copy_sz;
+ }
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ fw->size, fw->size);
+
+ devlink_flash_update_timeout_notify(dl, "Installing", NULL,
+ PDSC_FW_INSTALL_TIMEOUT);
+
+ fw_slot = pdsc_devcmd_fw_install(pdsc);
+ if (fw_slot < 0) {
+ err = fw_slot;
+ dev_err(pdsc->dev, "install failed: %pe\n", ERR_PTR(err));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware install");
+ goto err_out;
+ }
+
+ err = pdsc_fw_status_long_wait(pdsc, "Installing",
+ PDSC_FW_INSTALL_TIMEOUT,
+ PDS_CORE_FW_INSTALL_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ devlink_flash_update_timeout_notify(dl, "Selecting", NULL,
+ PDSC_FW_SELECT_TIMEOUT);
+
+ err = pdsc_devcmd_fw_activate(pdsc, fw_slot);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware select");
+ goto err_out;
+ }
+
+ err = pdsc_fw_status_long_wait(pdsc, "Selecting",
+ PDSC_FW_SELECT_TIMEOUT,
+ PDS_CORE_FW_ACTIVATE_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ dev_info(pdsc->dev, "Firmware update completed, slot %d\n", fw_slot);
+
+err_out:
+ if (err)
+ devlink_flash_update_status_notify(dl, "Flash failed",
+ NULL, 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flash done",
+ NULL, 0, 0);
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
new file mode 100644
index 000000000000..e2d14b1ca471
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+
+#include <linux/pds/pds_common.h>
+
+#include "core.h"
+
+MODULE_DESCRIPTION(PDSC_DRV_DESCRIPTION);
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_LICENSE("GPL");
+
+/* Supported devices */
+static const struct pci_device_id pdsc_id_table[] = {
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_CORE_PF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_VDPA_VF) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, pdsc_id_table);
+
+static void pdsc_wdtimer_cb(struct timer_list *t)
+{
+ struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
+
+ dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
+ mod_timer(&pdsc->wdtimer,
+ round_jiffies(jiffies + pdsc->wdtimer_period));
+
+ queue_work(pdsc->wq, &pdsc->health_work);
+}
+
+static void pdsc_unmap_bars(struct pdsc *pdsc)
+{
+ struct pdsc_dev_bar *bars = pdsc->bars;
+ unsigned int i;
+
+ for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (bars[i].vaddr)
+ pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ }
+}
+
+static int pdsc_map_bars(struct pdsc *pdsc)
+{
+ struct pdsc_dev_bar *bar = pdsc->bars;
+ struct pci_dev *pdev = pdsc->pdev;
+ struct device *dev = pdsc->dev;
+ struct pdsc_dev_bar *bars;
+ unsigned int i, j;
+ int num_bars = 0;
+ int err;
+ u32 sig;
+
+ bars = pdsc->bars;
+
+ /* Since the PCI interface in the hardware is configurable,
+ * we need to poke into all the bars to find the set we're
+ * expecting.
+ */
+ for (i = 0, j = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+
+ bars[j].len = pci_resource_len(pdev, i);
+ bars[j].bus_addr = pci_resource_start(pdev, i);
+ bars[j].res_index = i;
+
+ /* only map the whole bar 0 */
+ if (j > 0) {
+ bars[j].vaddr = NULL;
+ } else {
+ bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
+ if (!bars[j].vaddr) {
+ dev_err(dev, "Cannot map BAR %d, aborting\n", i);
+ return -ENODEV;
+ }
+ }
+
+ j++;
+ }
+ num_bars = j;
+
+ /* BAR0: dev_cmd and interrupts */
+ if (num_bars < 1) {
+ dev_err(dev, "No bars found\n");
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ if (bar->len < PDS_CORE_BAR0_SIZE) {
+ dev_err(dev, "Resource bar size %lu too small\n", bar->len);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ pdsc->info_regs = bar->vaddr + PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET;
+ pdsc->cmd_regs = bar->vaddr + PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET;
+ pdsc->intr_status = bar->vaddr + PDS_CORE_BAR0_INTR_STATUS_OFFSET;
+ pdsc->intr_ctrl = bar->vaddr + PDS_CORE_BAR0_INTR_CTRL_OFFSET;
+
+ sig = ioread32(&pdsc->info_regs->signature);
+ if (sig != PDS_CORE_DEV_INFO_SIGNATURE) {
+ dev_err(dev, "Incompatible firmware signature %x", sig);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ /* BAR1: doorbells */
+ bar++;
+ if (num_bars < 2) {
+ dev_err(dev, "Doorbell bar missing\n");
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ pdsc->db_pages = bar->vaddr;
+ pdsc->phy_db_pages = bar->bus_addr;
+
+ return 0;
+
+err_out:
+ pdsc_unmap_bars(pdsc);
+ return err;
+}
+
+void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num)
+{
+ return pci_iomap_range(pdsc->pdev,
+ pdsc->bars[PDS_CORE_PCI_BAR_DBELL].res_index,
+ (u64)page_num << PAGE_SHIFT, PAGE_SIZE);
+}
+
+static int pdsc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+ int ret = 0;
+
+ if (num_vfs > 0) {
+ pdsc->vfs = kcalloc(num_vfs, sizeof(struct pdsc_vf),
+ GFP_KERNEL);
+ if (!pdsc->vfs)
+ return -ENOMEM;
+ pdsc->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot enable SRIOV: %pe\n",
+ ERR_PTR(ret));
+ goto no_vfs;
+ }
+
+ return num_vfs;
+ }
+
+no_vfs:
+ pci_disable_sriov(pdev);
+
+ kfree(pdsc->vfs);
+ pdsc->vfs = NULL;
+ pdsc->num_vfs = 0;
+
+ return ret;
+}
+
+static int pdsc_init_vf(struct pdsc *vf)
+{
+ struct devlink *dl;
+ struct pdsc *pf;
+ int err;
+
+ pf = pdsc_get_pf_struct(vf->pdev);
+ if (IS_ERR_OR_NULL(pf))
+ return PTR_ERR(pf) ?: -1;
+
+ vf->vf_id = pci_iov_vf_id(vf->pdev);
+
+ dl = priv_to_devlink(vf);
+ devl_lock(dl);
+ devl_register(dl);
+ devl_unlock(dl);
+
+ pf->vfs[vf->vf_id].vf = vf;
+ err = pdsc_auxbus_dev_add(vf, pf);
+ if (err) {
+ devl_lock(dl);
+ devl_unregister(dl);
+ devl_unlock(dl);
+ }
+
+ return err;
+}
+
+static const struct devlink_health_reporter_ops pdsc_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = pdsc_fw_reporter_diagnose,
+};
+
+static const struct devlink_param pdsc_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_VNET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ pdsc_dl_enable_get,
+ pdsc_dl_enable_set,
+ pdsc_dl_enable_validate),
+};
+
+#define PDSC_WQ_NAME_LEN 24
+
+static int pdsc_init_pf(struct pdsc *pdsc)
+{
+ struct devlink_health_reporter *hr;
+ char wq_name[PDSC_WQ_NAME_LEN];
+ struct devlink *dl;
+ int err;
+
+ pcie_print_link_status(pdsc->pdev);
+
+ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
+ if (err) {
+ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = pdsc_map_bars(pdsc);
+ if (err)
+ goto err_out_release_regions;
+
+ /* General workqueue and timer, but don't start timer yet */
+ snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
+ pdsc->wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
+
+ mutex_init(&pdsc->devcmd_lock);
+ mutex_init(&pdsc->config_lock);
+ spin_lock_init(&pdsc->adminq_lock);
+
+ mutex_lock(&pdsc->config_lock);
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+
+ err = pdsc_setup(pdsc, PDSC_SETUP_INIT);
+ if (err)
+ goto err_out_unmap_bars;
+ err = pdsc_start(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ mutex_unlock(&pdsc->config_lock);
+
+ dl = priv_to_devlink(pdsc);
+ devl_lock(dl);
+ err = devl_params_register(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+ if (err) {
+ dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
+ ERR_PTR(err));
+ goto err_out_unlock_dl;
+ }
+
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
+ if (IS_ERR(hr)) {
+ dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
+ err = PTR_ERR(hr);
+ goto err_out_unreg_params;
+ }
+ pdsc->fw_reporter = hr;
+
+ devl_register(dl);
+ devl_unlock(dl);
+
+ /* Lastly, start the health check timer */
+ mod_timer(&pdsc->wdtimer, round_jiffies(jiffies + pdsc->wdtimer_period));
+
+ return 0;
+
+err_out_unreg_params:
+ devl_params_unregister(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+err_out_unlock_dl:
+ devl_unlock(dl);
+ pdsc_stop(pdsc);
+err_out_teardown:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+err_out_unmap_bars:
+ mutex_unlock(&pdsc->config_lock);
+ del_timer_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+ pci_free_irq_vectors(pdsc->pdev);
+ pdsc_unmap_bars(pdsc);
+err_out_release_regions:
+ pci_release_regions(pdsc->pdev);
+
+ return err;
+}
+
+static const struct devlink_ops pdsc_dl_ops = {
+ .info_get = pdsc_dl_info_get,
+ .flash_update = pdsc_dl_flash_update,
+};
+
+static const struct devlink_ops pdsc_dl_vf_ops = {
+};
+
+static DEFINE_IDA(pdsc_ida);
+
+static int pdsc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ const struct devlink_ops *ops;
+ struct devlink *dl;
+ struct pdsc *pdsc;
+ bool is_pf;
+ int err;
+
+ is_pf = !pdev->is_virtfn;
+ ops = is_pf ? &pdsc_dl_ops : &pdsc_dl_vf_ops;
+ dl = devlink_alloc(ops, sizeof(struct pdsc), dev);
+ if (!dl)
+ return -ENOMEM;
+ pdsc = devlink_priv(dl);
+
+ pdsc->pdev = pdev;
+ pdsc->dev = &pdev->dev;
+ set_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
+ pci_set_drvdata(pdev, pdsc);
+ pdsc_debugfs_add_dev(pdsc);
+
+ err = ida_alloc(&pdsc_ida, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(pdsc->dev, "%s: id alloc failed: %pe\n",
+ __func__, ERR_PTR(err));
+ goto err_out_free_devlink;
+ }
+ pdsc->uid = err;
+
+ /* Query system for DMA addressing limitation for the device. */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(PDS_CORE_ADDR_LEN));
+ if (err) {
+ dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting: %pe\n",
+ ERR_PTR(err));
+ goto err_out_free_ida;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
+ goto err_out_free_ida;
+ }
+ pci_set_master(pdev);
+
+ if (is_pf)
+ err = pdsc_init_pf(pdsc);
+ else
+ err = pdsc_init_vf(pdsc);
+ if (err) {
+ dev_err(dev, "Cannot init device: %pe\n", ERR_PTR(err));
+ goto err_out_clear_master;
+ }
+
+ clear_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
+ return 0;
+
+err_out_clear_master:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+err_out_free_ida:
+ ida_free(&pdsc_ida, pdsc->uid);
+err_out_free_devlink:
+ pdsc_debugfs_del_dev(pdsc);
+ devlink_free(dl);
+
+ return err;
+}
+
+static void pdsc_remove(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct devlink *dl;
+
+ /* Unhook the registrations first to be sure there
+ * are no requests while we're stopping.
+ */
+ dl = priv_to_devlink(pdsc);
+ devl_lock(dl);
+ devl_unregister(dl);
+ if (!pdev->is_virtfn) {
+ if (pdsc->fw_reporter) {
+ devl_health_reporter_destroy(pdsc->fw_reporter);
+ pdsc->fw_reporter = NULL;
+ }
+ devl_params_unregister(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+ }
+ devl_unlock(dl);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf)) {
+ pdsc_auxbus_dev_del(pdsc, pf);
+ pf->vfs[pdsc->vf_id].vf = NULL;
+ }
+ } else {
+ /* Remove the VFs and their aux_bus connections before other
+ * cleanup so that the clients can use the AdminQ to cleanly
+ * shut themselves down.
+ */
+ pdsc_sriov_configure(pdev, 0);
+
+ del_timer_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+
+ mutex_lock(&pdsc->config_lock);
+ set_bit(PDSC_S_STOPPING_DRIVER, &pdsc->state);
+
+ pdsc_stop(pdsc);
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+ mutex_unlock(&pdsc->config_lock);
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+
+ pci_free_irq_vectors(pdev);
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ }
+
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+
+ ida_free(&pdsc_ida, pdsc->uid);
+ pdsc_debugfs_del_dev(pdsc);
+ devlink_free(dl);
+}
+
+static struct pci_driver pdsc_driver = {
+ .name = PDS_CORE_DRV_NAME,
+ .id_table = pdsc_id_table,
+ .probe = pdsc_probe,
+ .remove = pdsc_remove,
+ .sriov_configure = pdsc_sriov_configure,
+};
+
+void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
+{
+ return pci_iov_get_pf_drvdata(vf_pdev, &pdsc_driver);
+}
+EXPORT_SYMBOL_GPL(pdsc_get_pf_struct);
+
+static int __init pdsc_init_module(void)
+{
+ if (strcmp(KBUILD_MODNAME, PDS_CORE_DRV_NAME))
+ return -EINVAL;
+
+ pdsc_debugfs_create();
+ return pci_register_driver(&pdsc_driver);
+}
+
+static void __exit pdsc_cleanup_module(void)
+{
+ pci_unregister_driver(&pdsc_driver);
+ pdsc_debugfs_destroy();
+}
+
+module_init(pdsc_init_module);
+module_exit(pdsc_cleanup_module);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index d3526cd38f3d..414b2e448d59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -124,7 +124,7 @@ static const struct hwmon_channel_info aq_hwmon_temp = {
.config = aq_hwmon_temp_config,
};
-static const struct hwmon_channel_info *aq_hwmon_info[] = {
+static const struct hwmon_channel_info * const aq_hwmon_info[] = {
&aq_hwmon_temp,
NULL,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8ff5a4f98d6f..dcd9367f05af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -56,6 +56,7 @@
#include <linux/hwmon-sysfs.h>
#include <net/page_pool.h>
#include <linux/align.h>
+#include <net/netdev_queues.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -331,26 +332,6 @@ static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
txr->kick_pending = 0;
}
-static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
- struct bnxt_tx_ring_info *txr,
- struct netdev_queue *txq)
-{
- netif_tx_stop_queue(txq);
-
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
- netif_tx_wake_queue(txq);
- return false;
- }
-
- return true;
-}
-
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -384,7 +365,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
- if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
+ bp->tx_wake_thresh))
return NETDEV_TX_BUSY;
}
@@ -490,7 +472,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
prod = NEXT_TX(prod);
tx_push->doorbell =
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
@@ -601,7 +583,7 @@ normal_tx:
wmb();
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_txr_db_kick(bp, txr, prod);
@@ -614,7 +596,8 @@ tx_done:
if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_txr_db_kick(bp, txr, prod);
- bnxt_txr_netif_try_stop_queue(bp, txr, txq);
+ netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
+ bp->tx_wake_thresh);
}
return NETDEV_TX_OK;
@@ -705,20 +688,11 @@ next_tx_int:
dev_kfree_skb_any(skb);
}
- netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
- txr->tx_cons = cons;
-
- /* Need to make the tx_cons update visible to bnxt_start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that bnxt_start_xmit()
- * will miss it and cause the queue to be stopped forever.
- */
- smp_mb();
+ WRITE_ONCE(txr->tx_cons, cons);
- if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
- READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
+ __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
+ bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -2387,7 +2361,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
- if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ if (BNXT_PTP_USE_RTC(bp)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u64 ns;
@@ -3237,6 +3211,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.pool_size = bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL;
@@ -7626,7 +7601,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
rc = -ENODEV;
goto no_ptp;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 18cac98ba58e..080e73496066 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2231,13 +2231,12 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+static inline u32 bnxt_tx_avail(struct bnxt *bp,
+ const struct bnxt_tx_ring_info *txr)
{
- /* Tell compiler to fetch tx indices from memory. */
- barrier();
+ u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
- return bp->tx_ring_size -
- ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+ return bp->tx_ring_size - (used & bp->tx_ring_mask);
}
static inline void bnxt_writeq(struct bnxt *bp, u64 val,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 3ed3a2b3b3a9..dde327f2c57e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -825,8 +825,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out2;
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ /* Create representors for VFs in switchdev mode */
+ devl_lock(bp->dl);
+ rc = bnxt_vf_reps_create(bp);
+ devl_unlock(bp->dl);
+ if (rc) {
+ netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
+ goto err_out3;
+ }
+
return 0;
+err_out3:
+ /* Disable SR-IOV */
+ pci_disable_sriov(bp->pdev);
+
err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index e7b5e28ee29f..852eb449ccae 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -304,7 +304,7 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
struct auxiliary_device *adev;
/* Skip if no auxiliary device init was done. */
- if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ if (!bp->aux_priv)
return;
aux_priv = bp->aux_priv;
@@ -324,6 +324,7 @@ static void bnxt_aux_dev_release(struct device *dev)
bp->edev = NULL;
kfree(aux_priv->edev);
kfree(aux_priv);
+ bp->aux_priv = NULL;
}
static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
@@ -359,19 +360,18 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
return;
- bp->aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
- if (!bp->aux_priv)
+ aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+ if (!aux_priv)
goto exit;
- bp->aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
- if (bp->aux_priv->id < 0) {
+ aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (aux_priv->id < 0) {
netdev_warn(bp->dev,
"ida alloc failed for ROCE auxiliary device\n");
- kfree(bp->aux_priv);
+ kfree(aux_priv);
goto exit;
}
- aux_priv = bp->aux_priv;
aux_dev = &aux_priv->aux_dev;
aux_dev->id = aux_priv->id;
aux_dev->name = "rdma";
@@ -380,10 +380,11 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
rc = auxiliary_device_init(aux_dev);
if (rc) {
- ida_free(&bnxt_aux_dev_ids, bp->aux_priv->id);
- kfree(bp->aux_priv);
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv);
goto exit;
}
+ bp->aux_priv = aux_priv;
/* From this point, all cleanup will happen via the .release callback &
* any error unwinding will need to include a call to
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index fcc65890820a..2f1a1f2d2157 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -356,10 +356,15 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* un-publish cfa_code_map so that RX path can't see it anymore */
kfree(bp->cfa_code_map);
bp->cfa_code_map = NULL;
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- if (closed)
+ if (closed) {
+ /* Temporarily set legacy mode to avoid re-opening
+ * representors and restore switchdev mode after that.
+ */
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
bnxt_open_nic(bp, false, false);
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ }
rtnl_unlock();
/* Need to call vf_reps_destroy() outside of rntl_lock
@@ -482,7 +487,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->min_mtu = ETH_ZLEN;
}
-static int bnxt_vf_reps_create(struct bnxt *bp)
+int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
struct bnxt_vf_rep *vf_rep;
@@ -535,7 +540,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
netif_keep_dst(bp->dev);
return 0;
@@ -559,6 +563,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
+ int ret = 0;
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
@@ -570,7 +575,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
bnxt_vf_reps_destroy(bp);
- return 0;
+ break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
@@ -578,15 +583,19 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
return -ENOTSUPP;
}
- if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- return -EPERM;
- }
- return bnxt_vf_reps_create(bp);
+ /* Create representors for existing VFs */
+ if (pci_num_vf(bp->pdev) > 0)
+ ret = bnxt_vf_reps_create(bp);
+ break;
default:
return -EINVAL;
}
+
+ if (!ret)
+ bp->eswitch_mode = mode;
+
+ return ret;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 5637a84884d7..33a965631d0b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -14,6 +14,7 @@
#define MAX_CFA_CODE 65536
+int bnxt_vf_reps_create(struct bnxt *bp);
void bnxt_vf_reps_destroy(struct bnxt *bp);
void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
@@ -37,6 +38,11 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
#else
+static inline int bnxt_vf_reps_create(struct bnxt *bp)
+{
+ return 0;
+}
+
static inline void bnxt_vf_reps_close(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 5843c93b1711..4efa5fe6972b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -64,7 +64,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
int frag_len;
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */
frag_tx_buf = &txr->tx_buf_ring[prod];
@@ -94,7 +94,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* Sync TX BD */
wmb();
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
return tx_buf;
}
@@ -161,7 +161,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
tx_cons = NEXT_TX(tx_cons);
}
- txr->tx_cons = tx_cons;
+ WRITE_ONCE(txr->tx_cons, tx_cons);
if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c1fc91c97cee..cfbdd0022764 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -95,6 +95,8 @@
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -245,6 +247,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_PTPUNI_OFFSET 20 /* PTP Unicast packet enable */
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
@@ -1363,7 +1367,7 @@ static inline bool macb_is_gem(struct macb *bp)
static inline bool gem_has_ptp(struct macb *bp)
{
- return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
+ return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
/**
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f77bd1223c8f..29a1199dad14 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -287,6 +287,11 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+ if (gem_has_ptp(bp)) {
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+ }
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -773,8 +778,12 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
- /* Enable Rx and Tx */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ /* Enable Rx and Tx; Enable PTP unicast */
+ ctrl = macb_readl(bp, NCR);
+ if (gem_has_ptp(bp))
+ ctrl |= MACB_BIT(PTPUNI);
+
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
netif_tx_wake_all_queues(ndev);
}
@@ -1063,6 +1072,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ addr &= ~GEM_BIT(DMA_RXVALID);
+#endif
return addr;
}
@@ -3889,17 +3902,17 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
-#ifdef CONFIG_MACB_USE_HWSTAMP
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
dev_err(&bp->pdev->dev,
"GEM doesn't support hardware ptp.\n");
else {
+#ifdef CONFIG_MACB_USE_HWSTAMP
bp->hw_dma_cap |= HW_DMA_CAP_PTP;
bp->ptp_info = &gem_ptp_info;
+#endif
}
}
-#endif
}
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index f962a95068a0..51d26fa190d7 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -258,6 +258,8 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
*/
gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
+ ts->tv_sec |= ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
* so subtract max size
@@ -266,8 +268,6 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
!(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
ts->tv_sec -= GEM_DMA_SEC_TOP;
- ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
-
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index dd9be229819a..d3541159487d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -1135,7 +1135,7 @@ void cxgb4_cleanup_tc_flower(struct adapter *adap)
return;
if (adap->flower_stats_timer.function)
- del_timer_sync(&adap->flower_stats_timer);
+ timer_shutdown_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
adap->tc_flower_initialized = false;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index f1e80d6996ef..1c78f66a89da 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -71,6 +71,7 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
+ select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 9bc099cf3cb1..4d75e6807e92 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -10,6 +10,7 @@ config FSL_ENETC_CORE
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
+ select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 2fc712b24d12..3c4fa26f0f9b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -25,6 +25,13 @@ void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
}
EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
+static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
+ u8 preemptible_tcs)
+{
+ priv->preemptible_tcs = preemptible_tcs;
+ enetc_mm_commit_preemptible_tcs(priv);
+}
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
@@ -2640,16 +2647,19 @@ static void enetc_reset_tc_mqprio(struct net_device *ndev)
}
enetc_debug_tx_ring_prios(priv);
+
+ enetc_change_preemptible_tcs(priv, 0);
}
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct tc_mqprio_qopt *mqprio = type_data;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
struct enetc_hw *hw = &priv->si->hw;
int num_stack_tx_queues = 0;
- u8 num_tc = mqprio->num_tc;
struct enetc_bdr *tx_ring;
+ u8 num_tc = qopt->num_tc;
int offset, count;
int err, tc, q;
@@ -2663,8 +2673,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return err;
for (tc = 0; tc < num_tc; tc++) {
- offset = mqprio->offset[tc];
- count = mqprio->count[tc];
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
num_stack_tx_queues += count;
err = netdev_set_tc_queue(ndev, tc, count, offset);
@@ -2693,6 +2703,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
enetc_debug_tx_ring_prios(priv);
+ enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs);
+
return 0;
err_reset_tc:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 8010f31cd10d..c97a8e3d7a7f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -355,6 +355,9 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
+
+ u8 preemptible_tcs;
+
enum enetc_active_offloads active_offloads;
u32 speed; /* store speed for compare update pspeed */
@@ -433,6 +436,7 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
+void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
/* control buffer descriptor ring (CBDR) */
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index da9d4b310fcd..e993ed04ab57 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -32,6 +32,12 @@ static const u32 enetc_port_regs[] = {
ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
};
+static const u32 enetc_port_mm_regs[] = {
+ ENETC_MMCSR, ENETC_PFPMR, ENETC_PTCFPR(0), ENETC_PTCFPR(1),
+ ENETC_PTCFPR(2), ENETC_PTCFPR(3), ENETC_PTCFPR(4), ENETC_PTCFPR(5),
+ ENETC_PTCFPR(6), ENETC_PTCFPR(7),
+};
+
static int enetc_get_reglen(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -45,6 +51,9 @@ static int enetc_get_reglen(struct net_device *ndev)
if (hw->port)
len += ARRAY_SIZE(enetc_port_regs);
+ if (hw->port && !!(priv->si->hw_features & ENETC_SI_F_QBU))
+ len += ARRAY_SIZE(enetc_port_mm_regs);
+
len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
return len;
@@ -90,6 +99,14 @@ static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
*buf++ = addr;
*buf++ = enetc_rd(hw, addr);
}
+
+ if (priv->si->hw_features & ENETC_SI_F_QBU) {
+ for (i = 0; i < ARRAY_SIZE(enetc_port_mm_regs); i++) {
+ addr = ENETC_PORT_BASE + enetc_port_mm_regs[i];
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
}
static const struct {
@@ -976,7 +993,9 @@ static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
lafs = ENETC_MMCSR_GET_LAFS(val);
state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs);
state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */
- state->tx_active = !!(val & ENETC_MMCSR_LPA);
+ state->tx_active = state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED);
state->verify_enabled = !(val & ENETC_MMCSR_VDIS);
state->verify_time = ENETC_MMCSR_GET_VT(val);
/* A verifyTime of 128 ms would exceed the 7 bit width
@@ -989,6 +1008,78 @@ static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
return 0;
}
+static int enetc_mm_wait_tx_active(struct enetc_hw *hw, int verify_time)
+{
+ int timeout = verify_time * USEC_PER_MSEC * ENETC_MM_VERIFY_RETRIES;
+ u32 val;
+
+ /* This will time out after the standard value of 3 verification
+ * attempts. To not sleep forever, it relies on a non-zero verify_time,
+ * guarantee which is provided by the ethtool nlattr policy.
+ */
+ return read_poll_timeout(enetc_port_rd, val,
+ ENETC_MMCSR_GET_VSTS(val) == 3,
+ ENETC_MM_VERIFY_SLEEP_US, timeout,
+ true, hw, ENETC_MMCSR);
+}
+
+static void enetc_set_ptcfpr(struct enetc_hw *hw, u8 preemptible_tcs)
+{
+ u32 val;
+ int tc;
+
+ for (tc = 0; tc < 8; tc++) {
+ val = enetc_port_rd(hw, ENETC_PTCFPR(tc));
+
+ if (preemptible_tcs & BIT(tc))
+ val |= ENETC_PTCFPR_FPE;
+ else
+ val &= ~ENETC_PTCFPR_FPE;
+
+ enetc_port_wr(hw, ENETC_PTCFPR(tc), val);
+ }
+}
+
+/* ENETC does not have an IRQ to notify changes to the MAC Merge TX status
+ * (active/inactive), but the preemptible traffic classes should only be
+ * committed to hardware once TX is active. Resort to polling.
+ */
+void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u8 preemptible_tcs = 0;
+ u32 val;
+ int err;
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+ if (!(val & ENETC_MMCSR_ME))
+ goto out;
+
+ if (!(val & ENETC_MMCSR_VDIS)) {
+ err = enetc_mm_wait_tx_active(hw, ENETC_MMCSR_GET_VT(val));
+ if (err)
+ goto out;
+ }
+
+ preemptible_tcs = priv->preemptible_tcs;
+out:
+ enetc_set_ptcfpr(hw, preemptible_tcs);
+}
+
+/* FIXME: Workaround for the link partner's verification failing if ENETC
+ * priorly received too much express traffic. The documentation doesn't
+ * suggest this is needed.
+ */
+static void enetc_restart_emac_rx(struct enetc_si *si)
+{
+ u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG);
+
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN);
+
+ if (val & ENETC_PM0_RX_EN)
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val);
+}
+
static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
@@ -1027,10 +1118,13 @@ static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
else
priv->active_offloads &= ~ENETC_F_QBU;
- /* If link is up, enable MAC Merge right away */
- if (!!(priv->active_offloads & ENETC_F_QBU) &&
- !(val & ENETC_MMCSR_LINK_FAIL))
- val |= ENETC_MMCSR_ME;
+ /* If link is up, enable/disable MAC Merge right away */
+ if (!(val & ENETC_MMCSR_LINK_FAIL)) {
+ if (!!(priv->active_offloads & ENETC_F_QBU))
+ val |= ENETC_MMCSR_ME;
+ else
+ val &= ~ENETC_MMCSR_ME;
+ }
val &= ~ENETC_MMCSR_VT_MASK;
val |= ENETC_MMCSR_VT(cfg->verify_time);
@@ -1040,6 +1134,10 @@ static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
enetc_port_wr(hw, ENETC_MMCSR, val);
+ enetc_restart_emac_rx(priv->si);
+
+ enetc_mm_commit_preemptible_tcs(priv);
+
mutex_unlock(&priv->mm_lock);
return 0;
@@ -1073,6 +1171,8 @@ void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
enetc_port_wr(hw, ENETC_MMCSR, val);
+ enetc_mm_commit_preemptible_tcs(priv);
+
mutex_unlock(&priv->mm_lock);
}
EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index de2e0ee8cdcb..1619943fb263 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -3,6 +3,9 @@
#include <linux/bitops.h>
+#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC
+#define ENETC_MM_VERIFY_RETRIES 3
+
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
@@ -965,6 +968,10 @@ static inline u32 enetc_usecs_to_cycles(u32 usecs)
return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
}
+/* Port traffic class frame preemption register */
+#define ENETC_PTCFPR(n) (0x1910 + (n) * 4) /* n = [0 ..7] */
+#define ENETC_PTCFPR_FPE BIT(31)
+
/* port time gating control register */
#define ENETC_PTGCR 0x11a00
#define ENETC_PTGCR_TGE BIT(31)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5ba1e0d71c68..9939ccafb556 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -507,6 +507,11 @@ struct bufdesc_ex {
/* i.MX6Q adds pm_qos support */
#define FEC_QUIRK_HAS_PMQOS BIT(23)
+/* Not all FEC hardware block MDIOs support accesses in C45 mode.
+ * Older blocks in the ColdFire parts do not support it.
+ */
+#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f3b16a6673e2..160c1b3525f5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,18 +100,19 @@ struct fec_devinfo {
static const struct fec_devinfo fec_imx25_info = {
.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx27_info = {
- .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
- FEC_QUIRK_NO_HARD_RESET,
+ FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6q_info = {
@@ -119,11 +120,12 @@ static const struct fec_devinfo fec_imx6q_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
- FEC_QUIRK_HAS_PMQOS,
+ FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_mvf600_info = {
- .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6x_info = {
@@ -132,7 +134,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
- FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -140,7 +143,8 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8mq_info = {
@@ -150,7 +154,8 @@ static const struct fec_devinfo fec_imx8mq_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+ FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8qm_info = {
@@ -160,14 +165,15 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_s32v234_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static struct platform_device_id fec_devtype[] = {
@@ -2434,8 +2440,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->name = "fec_enet_mii_bus";
fep->mii_bus->read = fec_enet_mdio_read_c22;
fep->mii_bus->write = fec_enet_mdio_write_c22;
- fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
- fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+ if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
+ fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+ fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+ }
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index e214b51d3c8b..98eb78d98e9f 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -49,7 +49,7 @@
#define GVE_XDP_ACTIONS 5
-#define GVE_TX_MAX_HEADER_SIZE 182
+#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index e50510b8e784..813da572abca 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -351,8 +351,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
int bytes;
int hlen;
- hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
- tcp_hdrlen(skb) : skb_headlen(skb);
+ hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
+ min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
hlen);
@@ -522,13 +522,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
pkt_desc = &tx->desc[idx];
l4_hdr_offset = skb_checksum_start_offset(skb);
- /* If the skb is gso, then we want the tcp header in the first segment
- * otherwise we want the linear portion of the skb (which will contain
- * the checksum because skb->csum_start and skb->csum_offset are given
- * relative to skb->head) in the first segment.
+ /* If the skb is gso, then we want the tcp header alone in the first segment
+ * otherwise we want the minimum required by the gVNIC spec.
*/
hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
- skb_headlen(skb);
+ min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
info->skb = skb;
/* We don't want to split the header, so if necessary, pad to the end
@@ -732,7 +730,7 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 reqi = tx->req;
pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
- if (pad >= GVE_TX_MAX_HEADER_SIZE)
+ if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
pad = 0;
info = &tx->info[reqi & tx->mask];
info->xdp_frame = frame_p;
@@ -812,7 +810,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
{
int nsegs;
- if (!gve_can_tx(tx, len + GVE_TX_MAX_HEADER_SIZE - 1))
+ if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
return -EBUSY;
nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6f5c16aebcbf..bd7ef59b1f2e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5287,31 +5287,6 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0);
}
- /* disable TSO for pcie and 10/100 speeds, to avoid
- * some hardware issues
- */
- if (!(adapter->flags & FLAG_TSO_FORCE)) {
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- e_info("10/100 speed: disabling TSO\n");
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- break;
- case SPEED_1000:
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- break;
- default:
- /* oops */
- break;
- }
- if (hw->mac.type == e1000_pch_spt) {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
- }
-
/* enable transmits in the hardware, need to do this
* after setting TARC(0)
*/
@@ -7525,6 +7500,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM);
+ /* disable TSO for pcie and 10/100 speeds to avoid
+ * some hardware issues and for i219 to fix transfer
+ * speed being capped at 60%
+ */
+ if (!(adapter->flags & FLAG_TSO_FORCE)) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ e_info("10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ }
+
/* Set user-changeable features (subset of all device features) */
netdev->hw_features = netdev->features;
netdev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c8ff5675b29d..b847bd105b16 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11072,8 +11072,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
ret = i40e_setup_misc_vector(pf);
+ if (ret)
+ goto end_unlock;
+ }
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -14147,15 +14150,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number;
}
- vsi->active_filters = 0;
- clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 2cdce251472c..9abaff1f2aff 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -58,8 +58,6 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
struct iavf_adapter *back;
struct net_device *netdev;
- unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
- unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@@ -157,15 +155,20 @@ struct iavf_vlan {
u16 tpid;
};
+enum iavf_vlan_state_t {
+ IAVF_VLAN_INVALID,
+ IAVF_VLAN_ADD, /* filter needs to be added */
+ IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */
+ IAVF_VLAN_ACTIVE, /* filter is accepted by PF */
+ IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */
+ IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */
+ IAVF_VLAN_REMOVE, /* filter needs to be removed from list */
+};
+
struct iavf_vlan_filter {
struct list_head list;
struct iavf_vlan vlan;
- struct {
- u8 is_new_vlan:1; /* filter is new, wait for PF answer */
- u8 remove:1; /* filter needs to be removed */
- u8 add:1; /* filter needs to be added */
- u8 padding:5;
- };
+ enum iavf_vlan_state_t state;
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@@ -257,6 +260,7 @@ struct iavf_adapter {
wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ int num_vlan_filters;
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 095201e83c9d..2de4baff4c20 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
f->vlan = vlan;
list_add_tail(&f->list, &adapter->vlan_filter_list);
- f->add = true;
+ f->state = IAVF_VLAN_ADD;
+ adapter->num_vlan_filters++;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->remove = true;
+ f->state = IAVF_VLAN_REMOVE;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
}
@@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
**/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
- u16 vid;
+ struct iavf_vlan_filter *f;
/* re-add all VLAN filters */
- for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
- iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
- for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
- iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_INACTIVE)
+ f->state = IAVF_VLAN_ADD;
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
/**
@@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
*/
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
- return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
- bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
+ return adapter->num_vlan_filters;
}
/**
@@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
- if (proto == cpu_to_be16(ETH_P_8021Q))
- clear_bit(vid, adapter->vsi.active_cvlans);
- else
- clear_bit(vid, adapter->vsi.active_svlans);
-
return 0;
}
@@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
}
}
- /* remove all VLAN filters */
+ /* disable all VLAN filters */
list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
- list) {
- if (vlf->add) {
- list_del(&vlf->list);
- kfree(vlf);
- } else {
- vlf->remove = true;
- }
- }
+ list)
+ vlf->state = IAVF_VLAN_DISABLE;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@@ -2914,6 +2908,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
list_del(&fv->list);
kfree(fv);
}
+ adapter->num_vlan_filters = 0;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -3131,9 +3126,6 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
- bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
- bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
-
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 4e17d006c52d..9afbbdac3590 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->is_new_vlan) {
- if (f->vlan.tpid == ETH_P_8021Q)
- clear_bit(f->vlan.vid,
- adapter->vsi.active_cvlans);
- else
- clear_bit(f->vlan.vid,
- adapter->vsi.active_svlans);
-
+ if (f->state == IAVF_VLAN_IS_NEW) {
list_del(&f->list);
kfree(f);
+ adapter->num_vlan_filters--;
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add)
+ if (f->state == IAVF_VLAN_ADD)
count++;
}
if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
@@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
+ if (f->state == IAVF_VLAN_ADD) {
vvfl->vlan_id[i] = f->vlan.vid;
i++;
- f->add = false;
- f->is_new_vlan = true;
+ f->state = IAVF_VLAN_IS_NEW;
if (i == count)
break;
}
@@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
+ if (f->state == IAVF_VLAN_ADD) {
struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
@@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vlan->tpid = f->vlan.tpid;
i++;
- f->add = false;
- f->is_new_vlan = true;
+ f->state = IAVF_VLAN_IS_NEW;
}
}
@@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
* filters marked for removal to enable bailing out before
* sending a virtchnl message
*/
- if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
+ if (f->state == IAVF_VLAN_REMOVE &&
+ !VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
- } else if (f->remove) {
+ adapter->num_vlan_filters--;
+ } else if (f->state == IAVF_VLAN_DISABLE &&
+ !VLAN_FILTERING_ALLOWED(adapter)) {
+ f->state = IAVF_VLAN_INACTIVE;
+ } else if (f->state == IAVF_VLAN_REMOVE ||
+ f->state == IAVF_VLAN_DISABLE) {
count++;
}
}
@@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
+ if (f->state == IAVF_VLAN_DISABLE) {
vvfl->vlan_id[i] = f->vlan.vid;
+ f->state = IAVF_VLAN_INACTIVE;
i++;
+ if (i == count)
+ break;
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ vvfl->vlan_id[i] = f->vlan.vid;
list_del(&f->list);
kfree(f);
+ adapter->num_vlan_filters--;
+ i++;
if (i == count)
break;
}
@@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
vvfl_v2->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
+ if (f->state == IAVF_VLAN_DISABLE ||
+ f->state == IAVF_VLAN_REMOVE) {
struct virtchnl_vlan_supported_caps *filtering_support =
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
@@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
vlan->tci = f->vlan.vid;
vlan->tpid = f->vlan.tpid;
- list_del(&f->list);
- kfree(f);
+ if (f->state == IAVF_VLAN_DISABLE) {
+ f->state = IAVF_VLAN_INACTIVE;
+ } else {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ }
i++;
if (i == count)
break;
@@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
- vlf->add = true;
+ vlf->state = IAVF_VLAN_ADD;
adapter->aq_required |=
IAVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -2260,7 +2271,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
- vlf->add = true;
+ vlf->state = IAVF_VLAN_ADD;
aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@@ -2444,15 +2455,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->is_new_vlan) {
- f->is_new_vlan = false;
- if (f->vlan.tpid == ETH_P_8021Q)
- set_bit(f->vlan.vid,
- adapter->vsi.active_cvlans);
- else
- set_bit(f->vlan.vid,
- adapter->vsi.active_svlans);
- }
+ if (f->state == IAVF_VLAN_IS_NEW)
+ f->state = IAVF_VLAN_ACTIVE;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 5fd75e75772e..daa6a1e894cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -542,6 +542,21 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
}
/**
+ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
+ * @fdir: pointer to the VF FDIR structure
+ */
+static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+{
+ enum ice_fltr_ptype flow;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+ flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
+}
+
+/**
* ice_vc_fdir_has_prof_conflict
* @vf: pointer to the VF structure
* @conf: FDIR configuration for each filter
@@ -1871,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
- goto err_free_conf;
+ goto err_rem_entry;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
@@ -1880,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
vf->vf_id, ret);
- goto err_rem_entry;
+ goto err_clr_irq;
}
exit:
kfree(stat);
return ret;
-err_rem_entry:
+err_clr_irq:
ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
err_free_conf:
devm_kfree(dev, conf);
@@ -1997,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
spin_lock_init(&fdir->ctx_lock);
fdir->ctx_irq.flags = 0;
fdir->ctx_done.flags = 0;
+ ice_vc_fdir_reset_cnt_all(fdir);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 773c35fecace..f2604fc05991 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -36,6 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
+#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
#include <net/xfrm.h>
@@ -1119,6 +1120,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
+ struct netdev_queue *txq;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return true;
@@ -1249,24 +1251,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (ring_is_xdp(tx_ring))
return !!budget;
- netdev_tx_completed_queue(txring_txq(tx_ring),
- total_packets, total_bytes);
-
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
- (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean.
- */
- smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->queue_index)
- && !test_bit(__IXGBE_DOWN, &adapter->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- }
- }
+ txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+ if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
+ ixgbe_desc_unused(tx_ring),
+ TX_WAKE_THRESHOLD,
+ netif_carrier_ok(tx_ring->netdev) &&
+ test_bit(__IXGBE_DOWN, &adapter->state)))
+ ++tx_ring->tx_stats.restart_queue;
return !!budget;
}
@@ -8270,22 +8262,10 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it.
- */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available.
- */
- if (likely(ixgbe_desc_unused(tx_ring) < size))
+ if (!netif_subqueue_try_stop(tx_ring->netdev, tx_ring->queue_index,
+ ixgbe_desc_unused(tx_ring), size))
return -EBUSY;
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f58a1c0144ba..884d64114bff 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,6 +34,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
+ select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9fe9ac3b23c3..9e948d091a69 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -729,6 +729,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
+ mac->speed = speed;
switch (speed) {
case SPEED_2500:
case SPEED_1000:
@@ -3232,6 +3233,9 @@ found:
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
+ if (mac->speed > 0 && mac->speed <= s.base.speed)
+ s.base.speed = 0;
+
mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 23c7abeb5c14..cdcf8534283e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1276,6 +1276,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index);
+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index f9c9f2ea4206..dd9581334b05 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -635,10 +635,20 @@ void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
static int
mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ struct mtk_flow_entry *prev;
+
entry->type = MTK_FLOW_TYPE_L2;
- return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
- mtk_flow_l2_ht_params);
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+ if (likely(!prev))
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &entry->l2_node, mtk_flow_l2_ht_params);
}
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 53cf87e9acbb..316fe2e70fea 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -47,7 +47,7 @@ static const char *mtk_foe_pkt_type_str(int type)
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
- u32 n_addr[4];
+ __be32 n_addr[4];
int i;
if (!ipv6) {
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 46634dc29d2f..02eebff02d45 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -235,7 +235,8 @@ out:
}
static int
-mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
+ int ppe_index)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action_entry *act;
@@ -452,6 +453,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
entry->cookie = f->cookie;
memcpy(&entry->data, &foe, sizeof(entry->data));
entry->wed_index = wed_index;
+ entry->ppe_index = ppe_index;
err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
if (err < 0)
@@ -520,25 +522,15 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
static DEFINE_MUTEX(mtk_flow_offload_mutex);
-static int
-mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index)
{
- struct flow_cls_offload *cls = type_data;
- struct net_device *dev = cb_priv;
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
int err;
- if (!tc_can_offload(dev))
- return -EOPNOTSUPP;
-
- if (type != TC_SETUP_CLSFLOWER)
- return -EOPNOTSUPP;
-
mutex_lock(&mtk_flow_offload_mutex);
switch (cls->command) {
case FLOW_CLS_REPLACE:
- err = mtk_flow_offload_replace(eth, cls);
+ err = mtk_flow_offload_replace(eth, cls, ppe_index);
break;
case FLOW_CLS_DESTROY:
err = mtk_flow_offload_destroy(eth, cls);
@@ -556,6 +548,26 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
}
static int
+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct mtk_mac *mac;
+ struct mtk_eth *eth;
+
+ mac = netdev_priv(dev);
+ eth = mac->hw;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ return mtk_flow_offload_cmd(eth, cls, 0);
+}
+
+static int
mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
{
struct mtk_mac *mac = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 95d890870984..4c205afbd230 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -13,6 +13,8 @@
#include <linux/mfd/syscon.h>
#include <linux/debugfs.h>
#include <linux/soc/mediatek/mtk_wed.h>
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
#include "mtk_eth_soc.h"
#include "mtk_wed_regs.h"
#include "mtk_wed.h"
@@ -41,6 +43,11 @@
static struct mtk_wed_hw *hw_list[2];
static DEFINE_MUTEX(hw_lock);
+struct mtk_wed_flow_block_priv {
+ struct mtk_wed_hw *hw;
+ struct net_device *dev;
+};
+
static void
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
@@ -1745,6 +1752,99 @@ out:
mutex_unlock(&hw_lock);
}
+static int
+mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct mtk_wed_flow_block_priv *priv = cb_priv;
+ struct flow_cls_offload *cls = type_data;
+ struct mtk_wed_hw *hw = priv->hw;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
+}
+
+static int
+mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct mtk_wed_flow_block_priv *priv;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+ struct mtk_eth *eth = hw->eth;
+ flow_setup_cb_t *cb;
+
+ if (!eth->soc->offload_version)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ cb = mtk_wed_setup_tc_block_cb;
+ f->driver_block_list = &block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hw = hw;
+ priv->dev = dev;
+ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
+ if (IS_ERR(block_cb)) {
+ kfree(priv);
+ return PTR_ERR(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ kfree(block_cb->cb_priv);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct mtk_wed_hw *hw = wed->hw;
+
+ if (hw->version < 2)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return mtk_wed_setup_tc_block(hw, dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void __iomem *wdma, phys_addr_t wdma_phy,
int index)
@@ -1764,6 +1864,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
+ .setup_tc = mtk_wed_setup_tc,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4b5e459b6d49..332472fe4990 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -681,14 +681,32 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
return 0;
}
-int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+ struct mlx4_cqe *cqe = _ctx->cqe;
+ enum xdp_rss_hash_type xht = 0;
+ __be16 status;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
return -ENODATA;
- *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
+ *hash = be32_to_cpu(cqe->immed_rss_invalid);
+ status = cqe->status;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
+ xht = XDP_RSS_L4_TCP;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
+ xht = XDP_RSS_L4_UDP;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
+ xht |= XDP_RSS_L3_IPV4;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
+ xht |= XDP_RSS_L3_IPV6;
+ if (cqe->ipv6_ext_mask)
+ xht |= XDP_RSS_L3_DYNHDR;
+ }
+ *rss_type = xht;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2f79378fbf6e..65cb63f6c465 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -228,7 +228,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
{
- return ring->prod - ring->cons > ring->full_size;
+ u32 used = READ_ONCE(ring->prod) - READ_ONCE(ring->cons);
+
+ return used > ring->full_size;
}
static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
@@ -1083,7 +1085,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
}
- ring->prod += nr_txbb;
+ WRITE_ONCE(ring->prod, ring->prod + nr_txbb);
/* If we used a bounce buffer then copy descriptor back into place */
if (unlikely(bounce))
@@ -1214,7 +1216,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
rx_ring->xdp_tx++;
- ring->prod += MLX4_EN_XDP_TX_NRTXBB;
+ WRITE_ONCE(ring->prod, ring->prod + MLX4_EN_XDP_TX_NRTXBB);
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 034733b13b1a..321f801c1d7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -798,7 +798,8 @@ int mlx4_en_netdev_event(struct notifier_block *this,
struct xdp_md;
int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
-int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
/*
* Functions for time stamping
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 6c2f1d4a58ab..ca3c66cd47ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -75,7 +75,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
-mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o
+mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o en/rep/bridge.o
mlx5_core-$(CONFIG_THERMAL) += thermal.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
@@ -112,8 +112,8 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
- steering/dr_definer.o \
- steering/dr_dbg.o lib/smfs.o
+ steering/dr_definer.o steering/dr_ptrn.o \
+ steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index b00e33ed05e9..d53de39539a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1802,7 +1802,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
if (in_size <= 16)
goto cache_miss;
- for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+ for (i = 0; i < dev->profile.num_cmd_caches; i++) {
ch = &cmd->cache[i];
if (in_size > ch->max_inbox_size)
continue;
@@ -2097,7 +2097,7 @@ static void destroy_msg_cache(struct mlx5_core_dev *dev)
struct mlx5_cmd_msg *n;
int i;
- for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+ for (i = 0; i < dev->profile.num_cmd_caches; i++) {
ch = &dev->cmd.cache[i];
list_for_each_entry_safe(msg, n, &ch->head, list) {
list_del(&msg->list);
@@ -2127,7 +2127,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
int k;
/* Initialize and fill the caches with initial entries */
- for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
+ for (k = 0; k < dev->profile.num_cmd_caches; k++) {
ch = &cmd->cache[k];
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->head);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index e7739acc926e..1b33533b15de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -60,9 +60,6 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
- if (mlx5_core_is_management_pf(dev))
- return false;
-
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false;
@@ -191,9 +188,6 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return false;
- if (mlx5_core_is_management_pf(dev))
- return false;
-
if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 7c9c4e40c019..d000236ddbac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -75,10 +75,6 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return 0;
- /* Management PF don't have a peer PF */
- if (mlx5_core_is_management_pf(dev))
- return 0;
-
return mlx5_host_pf_init(dev);
}
@@ -89,10 +85,6 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return;
- /* Management PF don't have a peer PF */
- if (mlx5_core_is_management_pf(dev))
- return;
-
mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ba615b74bb8e..b8987a404d75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -475,59 +475,18 @@ struct mlx5e_txqsq {
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
-/* XDP packets can be transmitted in different ways. On completion, we need to
- * distinguish between them to clean up things in a proper way.
- */
-enum mlx5e_xdp_xmit_mode {
- /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
- * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
- * returned.
- */
- MLX5E_XDP_XMIT_MODE_FRAME,
-
- /* The xdp_frame was created in place as a result of XDP_TX from a
- * regular RQ. No DMA remapping happened, and the page belongs to us.
- */
- MLX5E_XDP_XMIT_MODE_PAGE,
-
- /* No xdp_frame was created at all, the transmit happened from a UMEM
- * page. The UMEM Completion Ring producer pointer has to be increased.
- */
- MLX5E_XDP_XMIT_MODE_XSK,
-};
-
-struct mlx5e_xdp_info {
- enum mlx5e_xdp_xmit_mode mode;
- union {
- struct {
- struct xdp_frame *xdpf;
- dma_addr_t dma_addr;
- } frame;
- struct {
- struct mlx5e_rq *rq;
- struct page *page;
- } page;
- };
-};
-
-struct mlx5e_xmit_data {
- dma_addr_t dma_addr;
- void *data;
- u32 len;
-};
-
struct mlx5e_xdp_info_fifo {
- struct mlx5e_xdp_info *xi;
+ union mlx5e_xdp_info *xi;
u32 *cc;
u32 *pc;
u32 mask;
};
struct mlx5e_xdpsq;
+struct mlx5e_xmit_data;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- struct skb_shared_info *,
int);
struct mlx5e_xdpsq {
@@ -628,6 +587,7 @@ union mlx5e_alloc_units {
struct mlx5e_mpw_info {
u16 consumed_strides;
DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
+ struct mlx5e_frag_page linear_page;
union mlx5e_alloc_units alloc_units;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 31f3c6e51d9e..ef546ed8b4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -253,17 +253,20 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ u32 sz;
+
/* XSK frames are mapped as individual pages, because frames may come in
* an arbitrary order from random locations in the UMEM.
*/
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- /* XDP in mlx5e doesn't support multiple packets per page. */
- if (params->xdp_prog)
- return PAGE_SIZE;
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
- return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ /* XDP in mlx5e doesn't support multiple packets per page.
+ * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
+ */
+ return params->xdp_prog && sz < PAGE_SIZE ? PAGE_SIZE : sz;
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
@@ -320,6 +323,20 @@ static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
}
+bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+
+ return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
+ log_wqe_num_of_strides,
+ page_shift, umr_mode);
+}
+
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -402,6 +419,10 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ /* XDP in mlx5e doesn't support multiple packets per page. */
+ if (params->xdp_prog)
+ return PAGE_SHIFT;
+
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
@@ -572,9 +593,6 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
return -EOPNOTSUPP;
- if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- return -EINVAL;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index c9be6eb88012..a5d20f6d6d9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -153,6 +153,9 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index eb5aeba3addf..eb5abd0e55d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -81,23 +81,23 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
-static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
{
- return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+ return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
}
static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
{
- u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
- u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
+ u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
- if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc))
+ if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
return true;
return false;
}
-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc,
+static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
u16 skb_id, int budget)
{
struct skb_shared_hwtstamps hwts = {};
@@ -105,13 +105,13 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_
ptpsq->cq_stats->resync_event++;
- while (skb_cc != skb_id) {
+ while (skb_ci != skb_id) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
skb_tstamp_tx(skb, &hwts);
ptpsq->cq_stats->resync_cqe++;
napi_consume_skb(skb, budget);
- skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
}
}
@@ -120,7 +120,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
int budget)
{
u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
- u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp;
@@ -131,13 +131,13 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
goto out;
}
- if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) {
+ if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
/* already handled by a previous resync */
ptpsq->cq_stats->ooo_cqe_drop++;
return;
}
- mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget);
+ mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
}
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index ce85b48d327d..fd191925ab4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -220,6 +220,7 @@ mlx5_esw_bridge_port_obj_add(struct net_device *dev,
struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
const struct switchdev_obj *obj = port_obj_info->obj;
const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
u16 vport_num, esw_owner_vhca_id;
int err;
@@ -235,6 +236,11 @@ mlx5_esw_bridge_port_obj_add(struct net_device *dev,
err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
vlan->flags, br_offloads, extack);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = mlx5_esw_bridge_port_mdb_add(dev, vport_num, esw_owner_vhca_id, mdb->addr,
+ mdb->vid, br_offloads, extack);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -248,6 +254,7 @@ mlx5_esw_bridge_port_obj_del(struct net_device *dev,
{
const struct switchdev_obj *obj = port_obj_info->obj;
const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
u16 vport_num, esw_owner_vhca_id;
if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
@@ -261,6 +268,11 @@ mlx5_esw_bridge_port_obj_del(struct net_device *dev,
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ mlx5_esw_bridge_port_mdb_del(dev, vport_num, esw_owner_vhca_id, mdb->addr, mdb->vid,
+ br_offloads);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -306,6 +318,10 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
attr->u.vlan_protocol,
br_offloads);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = mlx5_esw_bridge_mcast_set(vport_num, esw_owner_vhca_id,
+ !attr->u.mc_disabled, br_offloads);
+ break;
default:
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index a047a2a4ddac..e8eea9ffd5eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -259,10 +259,6 @@ static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES,
"rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h");
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
if (err)
return err;
@@ -274,11 +270,7 @@ static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
return err;
}
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_end(fmsg);
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 44c1926843a1..b35ff289af49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -57,10 +57,6 @@ static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
"sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
if (err)
return err;
@@ -72,11 +68,7 @@ static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
return err;
}
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_end(fmsg);
+ return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
index a278f52d52b0..9db1b5307a8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -4,15 +4,6 @@
#include "act.h"
#include "en/tc_priv.h"
-static bool
-tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -26,7 +17,6 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_accept = {
- .can_offload = tc_act_can_offload_accept,
.parse_action = tc_act_parse_accept,
.is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index eba0c8698926..fc923a99b6a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -82,26 +82,6 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->flow_action = flow_action;
}
-void
-mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
- struct mlx5e_tc_flow_action *flow_action_reorder)
-{
- struct flow_action_entry *act;
- int i, j = 0;
-
- flow_action_for_each(i, act, flow_action) {
- /* Add CT action to be first. */
- if (act->id == FLOW_ACTION_CT)
- flow_action_reorder->entries[j++] = act;
- }
-
- flow_action_for_each(i, act, flow_action) {
- if (act->id == FLOW_ACTION_CT)
- continue;
- flow_action_reorder->entries[j++] = act;
- }
-}
-
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 8346557eeaf6..0e6e1872ac62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -17,8 +17,6 @@ struct mlx5e_tc_act_parse_state {
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
- bool ct;
- bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
@@ -56,6 +54,8 @@ struct mlx5e_tc_act {
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
+ bool (*is_missable)(const struct flow_action_entry *act);
+
int (*offload_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act,
struct flow_action_entry *act);
@@ -110,10 +110,6 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
-void
-mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
- struct mlx5e_tc_flow_action *flow_action_reorder);
-
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index a829c94289c1..92d3952dfa8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,53 +5,22 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
-static bool
-tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
- struct netlink_ext_ack *extack = parse_state->extack;
-
- if (parse_state->ct && !clear_action) {
- NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
- return false;
- }
-
- return true;
-}
-
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
- /* It's redundant to do ct clear more than once. */
- if (clear_action && parse_state->ct_clear)
- return 0;
-
- err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
- &attr->parse_attr->mod_hdr_acts,
- act, parse_state->extack);
+ err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, act, parse_state->extack);
if (err)
return err;
-
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
- if (clear_action) {
- parse_state->ct_clear = true;
- } else {
- attr->flags |= MLX5_ATTR_FLAG_CT;
- flow_flag_set(parse_state->flow, CT);
- parse_state->ct = true;
- }
+ attr->flags |= MLX5_ATTR_FLAG_CT;
return 0;
}
@@ -61,27 +30,10 @@ tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts;
- int err;
-
- /* If ct action exist, we can ignore previous ct_clear actions */
- if (parse_state->ct)
+ if (!(attr->flags & MLX5_ATTR_FLAG_CT))
return 0;
- if (parse_state->ct_clear) {
- err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts);
- if (err) {
- NL_SET_ERR_MSG_MOD(parse_state->extack,
- "Failed to set registers for ct clear");
- return err;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- /* Prevent handling of additional, redundant clear actions */
- parse_state->ct_clear = false;
- }
-
- return 0;
+ return mlx5_tc_ct_flow_offload(parse_state->ct_priv, attr);
}
static bool
@@ -95,10 +47,16 @@ tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
return true;
}
+static bool
+tc_act_is_missable_ct(const struct flow_action_entry *act)
+{
+ return !(act->ct.action & TCA_CT_ACT_CLEAR);
+}
+
struct mlx5e_tc_act mlx5e_tc_act_ct = {
- .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
- .is_multi_table_act = tc_act_is_multi_table_act_ct,
.post_parse = tc_act_post_parse_ct,
+ .is_multi_table_act = tc_act_is_multi_table_act_ct,
+ .is_missable = tc_act_is_missable_ct,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
index 7d16aeabb119..5dc81715d625 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -4,15 +4,6 @@
#include "act.h"
#include "en/tc_priv.h"
-static bool
-tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -25,7 +16,6 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_drop = {
- .can_offload = tc_act_can_offload_drop,
.parse_action = tc_act_parse_drop,
.is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 47597c524e59..3b272bbf4c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -78,15 +78,6 @@ out_err:
return err;
}
-static bool
-tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -114,6 +105,5 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_pedit = {
- .can_offload = tc_act_can_offload_pedit,
.parse_action = tc_act_parse_pedit,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
index 6454b031ff7a..80b4bc64380a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -4,15 +4,6 @@
#include "act.h"
#include "en/tc_priv.h"
-static bool
-tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -31,6 +22,5 @@ tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_ptype = {
- .can_offload = tc_act_can_offload_ptype,
.parse_action = tc_act_parse_ptype,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
index 2c0196431302..2df02f99cecf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -6,25 +6,6 @@
#include "en/tc_priv.h"
#include "en/tc/act/sample.h"
-static bool
-tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- struct netlink_ext_ack *extack = parse_state->extack;
- bool ct_nat;
-
- ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
-
- if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
- return false;
- }
-
- return true;
-}
-
static int
tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -65,7 +46,6 @@ tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
}
struct mlx5e_tc_act mlx5e_tc_act_sample = {
- .can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
.is_multi_table_act = tc_act_is_multi_table_act_sample,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index 915ce201aeb2..1b78bd9c106a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -5,15 +5,6 @@
#include "en/tc_priv.h"
#include "eswitch.h"
-static bool
-tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -27,6 +18,5 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_trap = {
- .can_offload = tc_act_can_offload_trap,
.parse_action = tc_act_parse_trap,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
index b4fa2de9711d..f1cae21c2c37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -32,15 +32,6 @@ tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
return 0;
}
-static bool
-tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -58,6 +49,5 @@ struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
};
struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
- .can_offload = tc_act_can_offload_tun_decap,
.parse_action = tc_act_parse_tun_decap,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index 2e0d88b513aa..c8a3eaf189f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -141,15 +141,6 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
return err;
}
-static bool
-tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -205,7 +196,6 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_vlan = {
- .can_offload = tc_act_can_offload_vlan,
.parse_action = tc_act_parse_vlan,
.post_parse = tc_act_post_parse_vlan,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 9a8a1a6bd99e..310b99230760 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -50,15 +50,6 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
return err;
}
-static bool
-tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- int act_index,
- struct mlx5_flow_attr *attr)
-{
- return true;
-}
-
static int
tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -81,6 +72,5 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
- .can_offload = tc_act_can_offload_vlan_mangle,
.parse_action = tc_act_parse_vlan_mangle,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 314983bc6f08..24badbaad935 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -83,12 +83,6 @@ struct mlx5_tc_ct_priv {
struct mlx5_tc_ct_debugfs debugfs;
};
-struct mlx5_ct_flow {
- struct mlx5_flow_attr *pre_ct_attr;
- struct mlx5_flow_handle *pre_ct_rule;
- struct mlx5_ct_ft *ft;
-};
-
struct mlx5_ct_zone_rule {
struct mlx5_ct_fs_rule *rule;
struct mlx5e_mod_hdr_handle *mh;
@@ -598,12 +592,6 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return 0;
}
-int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_mod_hdr_acts *mod_acts)
-{
- return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
-}
-
static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
char *modact)
@@ -1545,7 +1533,6 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
@@ -1555,8 +1542,8 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
return -EOPNOTSUPP;
}
+ attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
attr->ct_attr.zone = act->ct.zone;
- attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
attr->ct_attr.act_miss_cookie = act->miss_cookie;
@@ -1892,14 +1879,14 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
- * +---------------------+
- * + ft prio (tc chain) +
- * + original match +
- * +---------------------+
+ * +-----------------------+
+ * + rule (either original +
+ * + or post_act rule) +
+ * +-----------------------+
* | set act_miss_cookie mapping
* | set fte_id
* | set tunnel_id
- * | do decap
+ * | rest of actions before the CT action (for this orig/post_act rule)
* |
* +-------------+
* | Chain 0 |
@@ -1924,32 +1911,21 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
- * + post_act + original filter actions
+ * + post_act + rest of parsed filter's actions
* + fte_id match +------------------------>
* +--------------+
*
*/
-static struct mlx5_flow_handle *
+static int
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_flow_spec *orig_spec,
struct mlx5_flow_attr *attr)
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
- u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5_flow_attr *pre_ct_attr;
- struct mlx5_modify_hdr *mod_hdr;
- struct mlx5_ct_flow *ct_flow;
int act_miss_mapping = 0, err;
struct mlx5_ct_ft *ft;
u16 zone;
- ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
- if (!ct_flow) {
- return ERR_PTR(-ENOMEM);
- }
-
/* Register for CT established events */
ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
attr->ct_attr.nf_ft);
@@ -1958,23 +1934,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Failed to register to ft callback");
goto err_ft;
}
- ct_flow->ft = ft;
-
- /* Base flow attributes of both rules on original rule attribute */
- ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
- if (!ct_flow->pre_ct_attr) {
- err = -ENOMEM;
- goto err_alloc_pre;
- }
-
- pre_ct_attr = ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, attr_sz);
- pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
-
- /* Modify the original rule's action to fwd and modify, leave decap */
- pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
- pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ attr->ct_attr.ft = ft;
err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
&act_miss_mapping);
@@ -1982,136 +1942,89 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Failed to get register mapping for act miss");
goto err_get_act_miss;
}
- attr->ct_attr.act_miss_mapping = act_miss_mapping;
- err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
- MAPPED_OBJ_TO_REG, act_miss_mapping);
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
+ ct_priv->ns_type, MAPPED_OBJ_TO_REG, act_miss_mapping);
if (err) {
ct_dbg("Failed to set act miss register mapping");
goto err_mapping;
}
- /* If original flow is decap, we do it before going into ct table
- * so add a rewrite for the tunnel match_id.
- */
- if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- attr->chain == 0) {
- err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
- ct_priv->ns_type,
- TUNNEL_TO_REG,
- attr->tunnel_id);
- if (err) {
- ct_dbg("Failed to set tunnel register mapping");
- goto err_mapping;
- }
- }
-
- /* Change original rule point to ct table
- * Chain 0 sets the zone and jumps to ct table
+ /* Chain 0 sets the zone and jumps to ct table
* Other chains jump to pre_ct table to align with act_ct cached logic
*/
- pre_ct_attr->dest_chain = 0;
if (!attr->chain) {
zone = ft->zone & MLX5_CT_ZONE_MASK;
- err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
- ZONE_TO_REG, zone);
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
+ ct_priv->ns_type, ZONE_TO_REG, zone);
if (err) {
ct_dbg("Failed to set zone register mapping");
goto err_mapping;
}
- pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
} else {
- pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
- }
-
- mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- pre_mod_acts->num_actions,
- pre_mod_acts->actions);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- ct_dbg("Failed to create pre ct mod hdr");
- goto err_mapping;
- }
- pre_ct_attr->modify_hdr = mod_hdr;
- ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
- pre_ct_attr);
- if (IS_ERR(ct_flow->pre_ct_rule)) {
- err = PTR_ERR(ct_flow->pre_ct_rule);
- ct_dbg("Failed to add pre ct rule");
- goto err_insert_orig;
+ attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
}
- attr->ct_attr.ct_flow = ct_flow;
- mlx5e_mod_hdr_dealloc(pre_mod_acts);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ attr->ct_attr.act_miss_mapping = act_miss_mapping;
- return ct_flow->pre_ct_rule;
+ return 0;
-err_insert_orig:
- mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
err_get_act_miss:
- kfree(ct_flow->pre_ct_attr);
-err_alloc_pre:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
- kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
- return ERR_PTR(err);
+ return err;
}
-struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+int
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_handle *rule;
+ int err;
if (!priv)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
+
+ if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
+ err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
+ 0, 0, 0, 0);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
+ if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
+ return 0;
mutex_lock(&priv->control_lock);
- rule = __mlx5_tc_ct_flow_offload(priv, spec, attr);
+ err = __mlx5_tc_ct_flow_offload(priv, attr);
mutex_unlock(&priv->control_lock);
- return rule;
+ return err;
}
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_ct_flow *ct_flow,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
- struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
-
- mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
- mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
-
mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
- mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
-
- kfree(ct_flow->pre_ct_attr);
- kfree(ct_flow);
+ mlx5_tc_ct_del_ft_cb(ct_priv, attr->ct_attr.ft);
}
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr)
{
- struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
-
- /* We are called on error to clean up stuff from parsing
- * but we don't have anything for now
- */
- if (!ct_flow)
+ if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
return;
mutex_lock(&priv->control_lock);
- __mlx5_tc_ct_delete_flow(priv, ct_flow, attr);
+ __mlx5_tc_ct_delete_flow(priv, attr);
mutex_unlock(&priv->control_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 5c5ddaa83055..8e9316fa46d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -25,11 +25,11 @@ struct nf_flowtable;
struct mlx5_ct_attr {
u16 zone;
u16 ct_action;
- struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft;
u32 ct_labels_id;
u32 act_miss_mapping;
u64 act_miss_cookie;
+ struct mlx5_ct_ft *ft;
};
#define zone_to_reg_ct {\
@@ -113,15 +113,12 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
-struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+int
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr);
+
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr);
@@ -130,10 +127,6 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
-int
-mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_mod_hdr_acts *mod_acts);
-
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -176,16 +169,8 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
}
static inline int
-mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_mod_hdr_acts *mod_acts)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
@@ -193,13 +178,11 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
return -EOPNOTSUPP;
}
-static inline struct mlx5_flow_handle *
+static inline int
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+ struct mlx5_flow_attr *attr)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 451fd4342a5a..ba2b1f24ff14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -25,12 +25,11 @@ enum {
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
- MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
- MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
- MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
- MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
- MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
- MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 12,
+ MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 7,
+ MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 8,
+ MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 9,
+ MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 10,
+ MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 11,
};
struct mlx5e_tc_flow_parse_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 651be7aaf7d5..47381e949f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -77,6 +77,19 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
}
/* TX */
+struct mlx5e_xmit_data {
+ dma_addr_t dma_addr;
+ void *data;
+ u32 len : 31;
+ u32 has_frags : 1;
+};
+
+struct mlx5e_xmit_data_frags {
+ struct mlx5e_xmit_data xd;
+ struct skb_shared_info *sinfo;
+ dma_addr_t *dma_arr;
+};
+
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 15d15d0e5ef9..f0e6095809fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -34,6 +34,7 @@
#include <net/xdp_sock_drv.h>
#include "en/xdp.h"
#include "en/params.h"
+#include <linux/bitfield.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{
@@ -60,9 +61,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct xdp_buff *xdp)
{
struct page *page = virt_to_page(xdp->data);
- struct skb_shared_info *sinfo = NULL;
- struct mlx5e_xmit_data xdptxd;
- struct mlx5e_xdp_info xdpi;
+ struct mlx5e_xmit_data_frags xdptxdf = {};
+ struct mlx5e_xmit_data *xdptxd;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
int i;
@@ -71,8 +71,10 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
if (unlikely(!xdpf))
return false;
- xdptxd.data = xdpf->data;
- xdptxd.len = xdpf->len;
+ xdptxd = &xdptxdf.xd;
+ xdptxd->data = xdpf->data;
+ xdptxd->len = xdpf->len;
+ xdptxd->has_frags = xdp_frame_has_frags(xdpf);
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
/* The xdp_buff was in the UMEM and was copied into a newly
@@ -87,24 +89,29 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
*/
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
- xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
+ if (unlikely(xdptxd->has_frags))
+ return false;
- dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len,
+ dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len,
DMA_TO_DEVICE);
if (dma_mapping_error(sq->pdev, dma_addr)) {
xdp_return_frame(xdpf);
return false;
}
- xdptxd.dma_addr = dma_addr;
- xdpi.frame.xdpf = xdpf;
- xdpi.frame.dma_addr = dma_addr;
+ xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
return false;
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME });
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .frame.dma_addr = dma_addr });
return true;
}
@@ -114,17 +121,15 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
* mode.
*/
- xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
- xdpi.page.rq = rq;
-
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
- dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL);
- if (unlikely(xdp_frame_has_frags(xdpf))) {
- sinfo = xdp_get_shared_info_from_frame(xdpf);
+ if (xdptxd->has_frags) {
+ xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
+ xdptxdf.dma_arr = NULL;
- for (i = 0; i < sinfo->nr_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
+ for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
dma_addr_t addr;
u32 len;
@@ -136,22 +141,34 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
}
}
- xdptxd.dma_addr = dma_addr;
+ xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
return false;
- xdpi.page.page = page;
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
-
- if (unlikely(xdp_frame_has_frags(xdpf))) {
- for (i = 0; i < sinfo->nr_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
-
- xdpi.page.page = skb_frag_page(frag);
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_PAGE });
+
+ if (xdptxd->has_frags) {
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info)
+ { .page.num = 1 + xdptxdf.sinfo->nr_frags });
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .page.page = page });
+ for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info)
+ { .page.page = skb_frag_page(frag) });
}
+ } else {
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .page.num = 1 });
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .page.page = page });
}
return true;
@@ -169,14 +186,72 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
return 0;
}
-static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+/* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/
+#define RSS_TYPE_MAX_TABLE 16 /* 4-bits max 16 entries */
+#define RSS_L4 GENMASK(1, 0)
+#define RSS_L3 GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */
+
+/* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */
+enum mlx5_rss_hash_type {
+ RSS_TYPE_NO_HASH = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+ RSS_TYPE_L3_IPV4 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+ RSS_TYPE_L4_IPV4_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
+ RSS_TYPE_L4_IPV4_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
+ RSS_TYPE_L4_IPV4_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
+ RSS_TYPE_L3_IPV6 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+ RSS_TYPE_L4_IPV6_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
+ RSS_TYPE_L4_IPV6_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
+ RSS_TYPE_L4_IPV6_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+ FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
+};
+
+/* Invalid combinations will simply return zero, allows no boundary checks */
+static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = {
+ [RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE,
+ [1] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [2] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [3] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4,
+ [RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP,
+ [RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP,
+ [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC,
+ [RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6,
+ [RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP,
+ [RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP,
+ [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC,
+ [12] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [13] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [14] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+ [15] = XDP_RSS_TYPE_NONE, /* Implicit zero */
+};
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+ const struct mlx5_cqe64 *cqe = _ctx->cqe;
+ u32 hash_type, l4_type, ip_type, lookup;
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
return -ENODATA;
- *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+ *hash = be32_to_cpu(cqe->rss_hash_result);
+
+ hash_type = cqe->rss_hash_type;
+ BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */
+ ip_type = hash_type & CQE_RSS_HTYPE_IP;
+ l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type);
+ lookup = ip_type | l4_type;
+ *rss_type = mlx5_xdp_rss_type[lookup];
+
return 0;
}
@@ -322,26 +397,43 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct skb_shared_info *sinfo, int check_result);
+ int check_result);
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct skb_shared_info *sinfo, int check_result)
+ int check_result)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ struct mlx5e_xmit_data *p = xdptxd;
+ struct mlx5e_xmit_data tmp;
- if (unlikely(sinfo)) {
- /* MPWQE is enabled, but a multi-buffer packet is queued for
- * transmission. MPWQE can't send fragmented packets, so close
- * the current session and fall back to a regular WQE.
- */
- if (unlikely(sq->mpwqe.wqe))
- mlx5e_xdp_mpwqe_complete(sq);
- return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
+ if (xdptxd->has_frags) {
+ struct mlx5e_xmit_data_frags *xdptxdf =
+ container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
+
+ if (!!xdptxd->len + xdptxdf->sinfo->nr_frags > 1) {
+ /* MPWQE is enabled, but a multi-buffer packet is queued for
+ * transmission. MPWQE can't send fragmented packets, so close
+ * the current session and fall back to a regular WQE.
+ */
+ if (unlikely(sq->mpwqe.wqe))
+ mlx5e_xdp_mpwqe_complete(sq);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
+ }
+ if (!xdptxd->len) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
+
+ tmp.data = skb_frag_address(frag);
+ tmp.len = skb_frag_size(frag);
+ tmp.dma_addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[0] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+ p = &tmp;
+ }
}
- if (unlikely(xdptxd->len > sq->hw_mtu)) {
+ if (unlikely(p->len > sq->hw_mtu)) {
stats->err++;
return false;
}
@@ -359,7 +451,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_session_start(sq);
}
- mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
+ mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
@@ -387,8 +479,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct skb_shared_info *sinfo, int check_result)
+ int check_result)
{
+ struct mlx5e_xmit_data_frags *xdptxdf =
+ container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg;
@@ -400,26 +494,34 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
u16 ds_cnt, inline_hdr_sz;
u8 num_wqebbs = 1;
int num_frags = 0;
+ bool inline_ok;
+ bool linear;
u16 pi;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
+ inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
+ dma_len >= MLX5E_XDP_MIN_INLINE;
+
+ if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
stats->err++;
return false;
}
- ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
+ inline_hdr_sz = 0;
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
- ds_cnt++;
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+
+ linear = !!(dma_len - inline_hdr_sz);
+ ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
/* check_result must be 0 if sinfo is passed. */
if (!check_result) {
int stop_room = 1;
- if (unlikely(sinfo)) {
- ds_cnt += sinfo->nr_frags;
- num_frags = sinfo->nr_frags;
+ if (xdptxd->has_frags) {
+ ds_cnt += xdptxdf->sinfo->nr_frags;
+ num_frags = xdptxdf->sinfo->nr_frags;
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
/* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
* enough to hold all fragments.
@@ -440,53 +542,53 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
eseg = &wqe->eth;
dseg = wqe->data;
- inline_hdr_sz = 0;
-
/* copy the inline part if required */
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ if (inline_hdr_sz) {
memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
- MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
- dma_len -= MLX5E_XDP_MIN_INLINE;
- dma_addr += MLX5E_XDP_MIN_INLINE;
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+ inline_hdr_sz - sizeof(eseg->inline_hdr.start));
+ dma_len -= inline_hdr_sz;
+ dma_addr += inline_hdr_sz;
dseg++;
}
/* write the dma part */
- dseg->addr = cpu_to_be64(dma_addr);
- dseg->byte_count = cpu_to_be32(dma_len);
+ if (linear) {
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
- u8 num_pkts = 1 + num_frags;
+ if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
int i;
memset(&cseg->trailer, 0, sizeof(cseg->trailer));
memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- dseg->lkey = sq->mkey_be;
for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
dma_addr_t addr;
- addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag);
- dseg++;
dseg->addr = cpu_to_be64(addr);
dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
dseg->lkey = sq->mkey_be;
+ dseg++;
}
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
.num_wqebbs = num_wqebbs,
- .num_pkts = num_pkts,
+ .num_pkts = 1,
};
sq->pc += num_wqebbs;
@@ -511,20 +613,61 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
u16 i;
for (i = 0; i < wi->num_pkts; i++) {
- struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ union mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
switch (xdpi.mode) {
- case MLX5E_XDP_XMIT_MODE_FRAME:
+ case MLX5E_XDP_XMIT_MODE_FRAME: {
/* XDP_TX from the XSK RQ and XDP_REDIRECT */
- dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
- xdpi.frame.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ xdpf = xdpi.frame.xdpf;
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ dma_addr = xdpi.frame.dma_addr;
+
+ dma_unmap_single(sq->pdev, dma_addr,
+ xdpf->len, DMA_TO_DEVICE);
+ if (xdp_frame_has_frags(xdpf)) {
+ struct skb_shared_info *sinfo;
+ int j;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ for (j = 0; j < sinfo->nr_frags; j++) {
+ skb_frag_t *frag = &sinfo->frags[j];
+
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ dma_addr = xdpi.frame.dma_addr;
+
+ dma_unmap_single(sq->pdev, dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+ }
+ xdp_return_frame_bulk(xdpf, bq);
break;
- case MLX5E_XDP_XMIT_MODE_PAGE:
+ }
+ case MLX5E_XDP_XMIT_MODE_PAGE: {
/* XDP_TX from the regular RQ */
- page_pool_put_defragged_page(xdpi.page.rq->page_pool,
- xdpi.page.page, -1, true);
+ u8 num, n = 0;
+
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ num = xdpi.page.num;
+
+ do {
+ struct page *page;
+
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ page = xdpi.page.page;
+
+ /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
+ * as we know this is a page_pool page.
+ */
+ page_pool_put_defragged_page(page->pp,
+ page, -1, true);
+ } while (++n < num);
+
break;
+ }
case MLX5E_XDP_XMIT_MODE_XSK:
/* AF_XDP send */
(*xsk_frames)++;
@@ -658,34 +801,79 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
sq = &priv->channels.c[sq_num]->xdpsq;
for (i = 0; i < n; i++) {
+ struct mlx5e_xmit_data_frags xdptxdf = {};
struct xdp_frame *xdpf = frames[i];
- struct mlx5e_xmit_data xdptxd;
- struct mlx5e_xdp_info xdpi;
+ dma_addr_t dma_arr[MAX_SKB_FRAGS];
+ struct mlx5e_xmit_data *xdptxd;
bool ret;
- xdptxd.data = xdpf->data;
- xdptxd.len = xdpf->len;
- xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
- xdptxd.len, DMA_TO_DEVICE);
+ xdptxd = &xdptxdf.xd;
+ xdptxd->data = xdpf->data;
+ xdptxd->len = xdpf->len;
+ xdptxd->has_frags = xdp_frame_has_frags(xdpf);
+ xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data,
+ xdptxd->len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
+ if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr)))
break;
- xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
- xdpi.frame.xdpf = xdpf;
- xdpi.frame.dma_addr = xdptxd.dma_addr;
+ if (xdptxd->has_frags) {
+ int j;
+
+ xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
+ xdptxdf.dma_arr = dma_arr;
+ for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) {
+ skb_frag_t *frag = &xdptxdf.sinfo->frags[j];
+
+ dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag),
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(sq->pdev, dma_arr[j]))
+ continue;
+ /* mapping error */
+ while (--j >= 0)
+ dma_unmap_single(sq->pdev, dma_arr[j],
+ skb_frag_size(&xdptxdf.sinfo->frags[j]),
+ DMA_TO_DEVICE);
+ goto out;
+ }
+ }
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
if (unlikely(!ret)) {
- dma_unmap_single(sq->pdev, xdptxd.dma_addr,
- xdptxd.len, DMA_TO_DEVICE);
+ int j;
+
+ dma_unmap_single(sq->pdev, xdptxd->dma_addr,
+ xdptxd->len, DMA_TO_DEVICE);
+ if (!xdptxd->has_frags)
+ break;
+ for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
+ dma_unmap_single(sq->pdev, dma_arr[j],
+ skb_frag_size(&xdptxdf.sinfo->frags[j]),
+ DMA_TO_DEVICE);
break;
}
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+ /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME });
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr });
+ if (xdptxd->has_frags) {
+ int j;
+
+ for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info)
+ { .frame.dma_addr = dma_arr[j] });
+ }
nxmit++;
}
+out:
if (flags & XDP_XMIT_FLUSH) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 10bcfa6f88c1..9e8e6184f9e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -50,6 +50,53 @@ struct mlx5e_xdp_buff {
struct mlx5e_rq *rq;
};
+/* XDP packets can be transmitted in different ways. On completion, we need to
+ * distinguish between them to clean up things in a proper way.
+ */
+enum mlx5e_xdp_xmit_mode {
+ /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
+ * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
+ * returned.
+ */
+ MLX5E_XDP_XMIT_MODE_FRAME,
+
+ /* The xdp_frame was created in place as a result of XDP_TX from a
+ * regular RQ. No DMA remapping happened, and the page belongs to us.
+ */
+ MLX5E_XDP_XMIT_MODE_PAGE,
+
+ /* No xdp_frame was created at all, the transmit happened from a UMEM
+ * page. The UMEM Completion Ring producer pointer has to be increased.
+ */
+ MLX5E_XDP_XMIT_MODE_XSK,
+};
+
+/* xmit_mode entry is pushed to the fifo per packet, followed by multiple
+ * entries, as follows:
+ *
+ * MLX5E_XDP_XMIT_MODE_FRAME:
+ * xdpf, dma_addr_1, dma_addr_2, ... , dma_addr_num.
+ * 'num' is derived from xdpf.
+ *
+ * MLX5E_XDP_XMIT_MODE_PAGE:
+ * num, page_1, page_2, ... , page_num.
+ *
+ * MLX5E_XDP_XMIT_MODE_XSK:
+ * none.
+ */
+union mlx5e_xdp_info {
+ enum mlx5e_xdp_xmit_mode mode;
+ union {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ } frame;
+ union {
+ struct mlx5e_rq *rq;
+ u8 num;
+ struct page *page;
+ } page;
+};
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
@@ -66,11 +113,9 @@ extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
@@ -179,14 +224,14 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
- struct mlx5e_xdp_info *xi)
+ union mlx5e_xdp_info xi)
{
u32 i = (*fifo->pc)++ & fifo->mask;
- fifo->xi[i] = *xi;
+ fifo->xi[i] = xi;
}
-static inline struct mlx5e_xdp_info
+static inline union mlx5e_xdp_info
mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
{
return fifo->xi[(*fifo->cc)++ & fifo->mask];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 367a9505ca4f..597f319d4770 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -44,7 +44,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
* same.
*/
static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_info *xdpi)
+ union mlx5e_xdp_info *xdpi)
{
u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
@@ -54,15 +54,14 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
wi->num_pkts = 1;
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi);
sq->doorbell_cseg = &nopwqe->ctrl;
}
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
struct xsk_buff_pool *pool = sq->xsk_pool;
- struct mlx5e_xmit_data xdptxd;
- struct mlx5e_xdp_info xdpi;
+ union mlx5e_xdp_info xdpi;
bool work_done = true;
bool flush = false;
@@ -73,6 +72,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xmit_xdp_frame_check_mpwqe,
mlx5e_xmit_xdp_frame_check,
sq);
+ struct mlx5e_xmit_data xdptxd = {};
struct xdp_desc desc;
bool ret;
@@ -97,7 +97,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd,
check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
@@ -105,7 +105,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xsk_tx_post_err(sq, &xdpi);
} else {
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 91fa0a366316..55b38544422f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -35,11 +35,15 @@
#include <crypto/aead.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
+#include <net/netevent.h>
#include "en.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
+#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
+#define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
+
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
@@ -50,27 +54,71 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}
+static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_dwork *dwork =
+ container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
+ struct xfrm_state *x = sa_entry->x;
+
+ spin_lock(&x->lock);
+ xfrm_state_check_expire(x);
+ if (x->km.state == XFRM_STATE_EXPIRED) {
+ sa_entry->attrs.drop = true;
+ mlx5e_accel_ipsec_fs_modify(sa_entry);
+ }
+ spin_unlock(&x->lock);
+
+ if (sa_entry->attrs.drop)
+ return;
+
+ queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
+ MLX5_IPSEC_RESCHED);
+}
+
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- struct xfrm_replay_state_esn *replay_esn;
+ struct xfrm_state *x = sa_entry->x;
u32 seq_bottom = 0;
+ u32 esn, esn_msb;
u8 overlap;
- if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
- sa_entry->esn_state.trigger = 0;
+ switch (x->xso.type) {
+ case XFRM_DEV_OFFLOAD_PACKET:
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
+ break;
+ default:
+ WARN_ON(true);
+ return false;
+ }
+ break;
+ case XFRM_DEV_OFFLOAD_CRYPTO:
+ /* Already parsed by XFRM core */
+ esn = x->replay_esn->seq;
+ break;
+ default:
+ WARN_ON(true);
return false;
}
- replay_esn = sa_entry->x->replay_esn;
- if (replay_esn->seq >= replay_esn->replay_window)
- seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
-
overlap = sa_entry->esn_state.overlap;
- sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
- htonl(seq_bottom));
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
+
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+
+ sa_entry->esn_state.esn = esn;
+ sa_entry->esn_state.esn_msb = esn_msb;
- sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
sa_entry->esn_state.overlap = 0;
return true;
@@ -87,25 +135,161 @@ static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
+ s64 start_value, n;
- attrs->hard_packet_limit = x->lft.hard_packet_limit;
+ attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
+ attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
if (x->lft.soft_packet_limit == XFRM_INF)
return;
- /* Hardware decrements hard_packet_limit counter through
- * the operation. While fires an event when soft_packet_limit
- * is reached. It emans that we need substitute the numbers
- * in order to properly count soft limit.
+ /* Compute hard limit initial value and number of rounds.
+ *
+ * The counting pattern of hardware counter goes:
+ * value -> 2^31-1
+ * 2^31 | (2^31-1) -> 2^31-1
+ * 2^31 | (2^31-1) -> 2^31-1
+ * [..]
+ * 2^31 | (2^31-1) -> 0
+ *
+ * The pattern is created by using an ASO operation to atomically set
+ * bit 31 after the down counter clears bit 31. This is effectively an
+ * atomic addition of 2**31 to the counter.
+ *
+ * We wish to configure the counter, within the above pattern, so that
+ * when it reaches 0, it has hit the hard limit. This is defined by this
+ * system of equations:
*
- * As an example:
- * XFRM user sets soft limit is 2 and hard limit is 9 and
- * expects to see soft event after 2 packets and hard event
- * after 9 packets. In our case, the hard limit will be set
- * to 9 and soft limit is comparator to 7 so user gets the
- * soft event after 2 packeta
+ * hard_limit == start_value + n * 2^31
+ * n >= 0
+ * start_value < 2^32, start_value >= 0
+ *
+ * These equations are not single-solution, there are often two choices:
+ * hard_limit == start_value + n * 2^31
+ * hard_limit == (start_value+2^31) + (n-1) * 2^31
+ *
+ * The algorithm selects the solution that keeps the counter value
+ * above 2^31 until the final iteration.
+ */
+
+ /* Start by estimating n and compute start_value */
+ n = attrs->lft.hard_packet_limit / BIT_ULL(31);
+ start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
+
+ /* Choose the best of the two solutions: */
+ if (n >= 1)
+ n -= 1;
+
+ /* Computed values solve the system of equations: */
+ start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
+
+ /* The best solution means: when there are multiple iterations we must
+ * start above 2^31 and count down to 2**31 to get the interrupt.
+ */
+ attrs->lft.hard_packet_limit = lower_32_bits(start_value);
+ attrs->lft.numb_rounds_hard = (u64)n;
+
+ /* Compute soft limit initial value and number of rounds.
+ *
+ * The soft_limit is achieved by adjusting the counter's
+ * interrupt_value. This is embedded in the counting pattern created by
+ * hard packet calculations above.
+ *
+ * We wish to compute the interrupt_value for the soft_limit. This is
+ * defined by this system of equations:
+ *
+ * soft_limit == start_value - soft_value + n * 2^31
+ * n >= 0
+ * soft_value < 2^32, soft_value >= 0
+ * for n == 0 start_value > soft_value
+ *
+ * As with compute_hard_n_value() the equations are not single-solution.
+ * The algorithm selects the solution that has:
+ * 2^30 <= soft_limit < 2^31 + 2^30
+ * for the interior iterations, which guarantees a large guard band
+ * around the counter hard limit and next interrupt.
+ */
+
+ /* Start by estimating n and compute soft_value */
+ n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
+ start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
+ x->lft.soft_packet_limit;
+
+ /* Compare against constraints and adjust n */
+ if (n < 0)
+ n = 0;
+ else if (start_value >= BIT_ULL(32))
+ n -= 1;
+ else if (start_value < 0)
+ n += 1;
+
+ /* Choose the best of the two solutions: */
+ start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
+ if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
+ n += 1;
+
+ /* Note that the upper limit of soft_value happens naturally because we
+ * always select the lowest soft_value.
*/
- attrs->soft_packet_limit =
- x->lft.hard_packet_limit - x->lft.soft_packet_limit;
+
+ /* Computed values solve the system of equations: */
+ start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
+
+ /* The best solution means: when there are multiple iterations we must
+ * not fall below 2^30 as that would get too close to the false
+ * hard_limit and when we reach an interior iteration for soft_limit it
+ * has to be far away from 2**32-1 which is the counter reset point
+ * after the +2^31 to accommodate latency.
+ */
+ attrs->lft.soft_packet_limit = lower_32_bits(start_value);
+ attrs->lft.numb_rounds_soft = (u64)n;
+}
+
+static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct xfrm_state *x = sa_entry->x;
+ struct net_device *netdev;
+ struct neighbour *n;
+ u8 addr[ETH_ALEN];
+ const void *pkey;
+ u8 *dst, *src;
+
+ if (attrs->mode != XFRM_MODE_TUNNEL ||
+ attrs->type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+
+ netdev = x->xso.real_dev;
+
+ mlx5_query_mac_address(mdev, addr);
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ src = attrs->dmac;
+ dst = attrs->smac;
+ pkey = &attrs->saddr.a4;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ src = attrs->smac;
+ dst = attrs->dmac;
+ pkey = &attrs->daddr.a4;
+ break;
+ default:
+ return;
+ }
+
+ ether_addr_copy(src, addr);
+ n = neigh_lookup(&arp_tbl, pkey, netdev);
+ if (!n) {
+ n = neigh_create(&arp_tbl, pkey, netdev);
+ if (IS_ERR(n))
+ return;
+ neigh_event_send(n, NULL);
+ attrs->drop = true;
+ } else {
+ neigh_ha_snapshot(addr, n, netdev);
+ ether_addr_copy(dst, addr);
+ }
+ neigh_release(n);
}
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -141,11 +325,11 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
aes_gcm->icv_len = x->aead->alg_icv_len;
/* esn */
- if (sa_entry->esn_state.trigger) {
- attrs->esn_trigger = true;
- attrs->esn = sa_entry->esn_state.esn;
- attrs->esn_overlap = sa_entry->esn_state.overlap;
- attrs->replay_window = x->replay_esn->replay_window;
+ if (x->props.flags & XFRM_STATE_ESN) {
+ attrs->replay_esn.trigger = true;
+ attrs->replay_esn.esn = sa_entry->esn_state.esn;
+ attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
+ attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
}
attrs->dir = x->xso.dir;
@@ -163,8 +347,10 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->upspec.sport = ntohs(x->sel.sport);
attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
attrs->upspec.proto = x->sel.proto;
+ attrs->mode = x->props.mode;
mlx5e_ipsec_init_limits(sa_entry, attrs);
+ mlx5e_ipsec_init_macs(sa_entry, attrs);
}
static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
@@ -233,6 +419,11 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
+ if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
+ return -EINVAL;
+ }
+
switch (x->xso.type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
@@ -240,11 +431,6 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->props.mode != XFRM_MODE_TRANSPORT &&
- x->props.mode != XFRM_MODE_TUNNEL) {
- NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
- return -EINVAL;
- }
break;
case XFRM_DEV_OFFLOAD_PACKET:
if (!(mlx5_ipsec_device_caps(mdev) &
@@ -253,8 +439,9 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->props.mode != XFRM_MODE_TRANSPORT) {
- NL_SET_ERR_MSG_MOD(extack, "Only transport xfrm states may be offloaded in packet mode");
+ if (x->props.mode == XFRM_MODE_TUNNEL &&
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
return -EINVAL;
}
@@ -283,6 +470,11 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
return -EINVAL;
}
+
+ if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
+ NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
+ return -EINVAL;
+ }
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
@@ -291,14 +483,134 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return 0;
}
-static void _update_xfrm_state(struct work_struct *work)
+static void mlx5e_ipsec_modify_state(struct work_struct *_work)
{
- struct mlx5e_ipsec_modify_state_work *modify_work =
- container_of(work, struct mlx5e_ipsec_modify_state_work, work);
- struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
- modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
+ struct mlx5e_ipsec_work *work =
+ container_of(_work, struct mlx5e_ipsec_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
+ struct mlx5_accel_esp_xfrm_attrs *attrs;
+
+ attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
- mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
+ mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
+}
+
+static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct xfrm_state *x = sa_entry->x;
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
+ x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
+ return;
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
+ return;
+ }
+
+ sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
+}
+
+static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_work *work =
+ container_of(_work, struct mlx5e_ipsec_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
+ struct mlx5e_ipsec_netevent_data *data = work->data;
+ struct mlx5_accel_esp_xfrm_attrs *attrs;
+
+ attrs = &sa_entry->attrs;
+
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ ether_addr_copy(attrs->smac, data->addr);
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ ether_addr_copy(attrs->dmac, data->addr);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ }
+ attrs->drop = false;
+ mlx5e_accel_ipsec_fs_modify(sa_entry);
+}
+
+static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct xfrm_state *x = sa_entry->x;
+ struct mlx5e_ipsec_work *work;
+ void *data = NULL;
+
+ switch (x->xso.type) {
+ case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (!(x->props.flags & XFRM_STATE_ESN))
+ return 0;
+ break;
+ case XFRM_DEV_OFFLOAD_PACKET:
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ switch (x->xso.type) {
+ case XFRM_DEV_OFFLOAD_CRYPTO:
+ data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!data)
+ goto free_work;
+
+ INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
+ break;
+ case XFRM_DEV_OFFLOAD_PACKET:
+ data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
+ GFP_KERNEL);
+ if (!data)
+ goto free_work;
+
+ INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
+ break;
+ default:
+ break;
+ }
+
+ work->data = data;
+ work->sa_entry = sa_entry;
+ sa_entry->work = work;
+ return 0;
+
+free_work:
+ kfree(work);
+ return -ENOMEM;
+}
+
+static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct xfrm_state *x = sa_entry->x;
+ struct mlx5e_ipsec_dwork *dwork;
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return 0;
+
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
+ return 0;
+
+ if (x->lft.soft_packet_limit == XFRM_INF &&
+ x->lft.hard_packet_limit == XFRM_INF)
+ return 0;
+
+ dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
+ if (!dwork)
+ return -ENOMEM;
+
+ dwork->sa_entry = sa_entry;
+ INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
+ sa_entry->dwork = dwork;
+ return 0;
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x,
@@ -332,18 +644,36 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
goto err_xfrm;
/* check esn */
- mlx5e_ipsec_update_esn_state(sa_entry);
+ if (x->props.flags & XFRM_STATE_ESN)
+ mlx5e_ipsec_update_esn_state(sa_entry);
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
+
+ err = mlx5_ipsec_create_work(sa_entry);
+ if (err)
+ goto err_xfrm;
+
+ err = mlx5e_ipsec_create_dwork(sa_entry);
+ if (err)
+ goto release_work;
+
/* create hw context */
err = mlx5_ipsec_create_sa_ctx(sa_entry);
if (err)
- goto err_xfrm;
+ goto release_dwork;
err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
goto err_hw_ctx;
+ if (x->props.mode == XFRM_MODE_TUNNEL &&
+ x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
+ !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
+ err = -EINVAL;
+ goto err_add_rule;
+ }
+
/* We use *_bh() variant because xfrm_timer_handler(), which runs
* in softirq context, can reach our state delete logic and we need
* xa_erase_bh() there.
@@ -353,11 +683,17 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_add_rule;
- if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
- sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
- mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
+ mlx5e_ipsec_set_esn_ops(sa_entry);
+
+ if (sa_entry->dwork)
+ queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
+ MLX5_IPSEC_RESCHED);
+
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
+ x->props.mode == XFRM_MODE_TUNNEL)
+ xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
- INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
out:
x->xso.offload_handle = (unsigned long)sa_entry;
return 0;
@@ -366,15 +702,22 @@ err_add_rule:
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
+release_dwork:
+ kfree(sa_entry->dwork);
+release_work:
+ if (sa_entry->work)
+ kfree(sa_entry->work->data);
+ kfree(sa_entry->work);
err_xfrm:
kfree(sa_entry);
- NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
+ NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
return err;
}
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -383,6 +726,12 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
+
+ if (attrs->mode == XFRM_MODE_TUNNEL &&
+ attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ /* Make sure that no ARP requests are running in parallel */
+ flush_workqueue(ipsec->wq);
+
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
@@ -392,13 +741,62 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto sa_entry_free;
- cancel_work_sync(&sa_entry->modify_work.work);
+ if (sa_entry->work)
+ cancel_work_sync(&sa_entry->work->work);
+
+ if (sa_entry->dwork)
+ cancel_delayed_work_sync(&sa_entry->dwork->dwork);
+
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
+ kfree(sa_entry->dwork);
+ if (sa_entry->work)
+ kfree(sa_entry->work->data);
+ kfree(sa_entry->work);
sa_entry_free:
kfree(sa_entry);
}
+static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5_accel_esp_xfrm_attrs *attrs;
+ struct mlx5e_ipsec_netevent_data *data;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5e_ipsec *ipsec;
+ struct neighbour *n = ptr;
+ struct net_device *netdev;
+ struct xfrm_state *x;
+ unsigned long idx;
+
+ if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
+ return NOTIFY_DONE;
+
+ ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
+ xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
+ attrs = &sa_entry->attrs;
+
+ if (attrs->family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->daddr.a4))
+ continue;
+ } else {
+ if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->daddr.a4))
+ continue;
+ }
+
+ x = sa_entry->x;
+ netdev = x->xso.real_dev;
+ data = sa_entry->work->data;
+
+ neigh_ha_snapshot(data->addr, n, netdev);
+ queue_work(ipsec->wq, &sa_entry->work->work);
+ }
+
+ return NOTIFY_DONE;
+}
+
void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec;
@@ -415,8 +813,8 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
ipsec->mdev = priv->mdev;
- ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
- priv->netdev->name);
+ ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
+ priv->netdev->name);
if (!ipsec->wq)
goto err_wq;
@@ -427,6 +825,13 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
goto err_aso;
}
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
+ ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
+ ret = register_netevent_notifier(&ipsec->netevent_nb);
+ if (ret)
+ goto clear_aso;
+ }
+
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
goto err_fs_init;
@@ -437,6 +842,9 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
return;
err_fs_init:
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ unregister_netevent_notifier(&ipsec->netevent_nb);
+clear_aso:
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
mlx5e_ipsec_aso_cleanup(ipsec);
err_aso:
@@ -455,6 +863,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ unregister_netevent_notifier(&ipsec->netevent_nb);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
@@ -480,16 +890,18 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5e_ipsec_modify_state_work *modify_work =
- &sa_entry->modify_work;
+ struct mlx5e_ipsec_work *work = sa_entry->work;
+ struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
bool need_update;
need_update = mlx5e_ipsec_update_esn_state(sa_entry);
if (!need_update)
return;
- mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
- queue_work(sa_entry->ipsec->wq, &modify_work->work);
+ sa_entry_shadow = work->data;
+ memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
+ queue_work(sa_entry->ipsec->wq, &work->work);
}
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 68ae5230eb75..4e9887171508 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -60,10 +60,24 @@ struct upspec {
u8 proto;
};
+struct mlx5_ipsec_lft {
+ u64 hard_packet_limit;
+ u64 soft_packet_limit;
+ u64 numb_rounds_hard;
+ u64 numb_rounds_soft;
+};
+
+struct mlx5_replay_esn {
+ u32 replay_window;
+ u32 esn;
+ u32 esn_msb;
+ u8 overlap : 1;
+ u8 trigger : 1;
+};
+
struct mlx5_accel_esp_xfrm_attrs {
- u32 esn;
u32 spi;
- u32 flags;
+ u32 mode;
struct aes_gcm_keymat aes_gcm;
union {
@@ -78,15 +92,15 @@ struct mlx5_accel_esp_xfrm_attrs {
struct upspec upspec;
u8 dir : 2;
- u8 esn_overlap : 1;
- u8 esn_trigger : 1;
u8 type : 2;
+ u8 drop : 1;
u8 family;
- u32 replay_window;
+ struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
- u64 hard_packet_limit;
- u64 soft_packet_limit;
+ struct mlx5_ipsec_lft lft;
+ u8 smac[ETH_ALEN];
+ u8 dmac[ETH_ALEN];
};
enum mlx5_ipsec_cap {
@@ -95,6 +109,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
MLX5_IPSEC_CAP_ROCE = 1 << 3,
MLX5_IPSEC_CAP_PRIO = 1 << 4,
+ MLX5_IPSEC_CAP_TUNNEL = 1 << 5,
};
struct mlx5e_priv;
@@ -125,8 +140,17 @@ struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_work {
struct work_struct work;
- struct mlx5e_ipsec *ipsec;
- u32 id;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ void *data;
+};
+
+struct mlx5e_ipsec_netevent_data {
+ u8 addr[ETH_ALEN];
+};
+
+struct mlx5e_ipsec_dwork {
+ struct delayed_work dwork;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
};
struct mlx5e_ipsec_aso {
@@ -149,12 +173,13 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_aso *aso;
struct notifier_block nb;
+ struct notifier_block netevent_nb;
struct mlx5_ipsec_fs *roce;
};
struct mlx5e_ipsec_esn_state {
u32 esn;
- u8 trigger: 1;
+ u32 esn_msb;
u8 overlap: 1;
};
@@ -165,9 +190,10 @@ struct mlx5e_ipsec_rule {
struct mlx5_fc *fc;
};
-struct mlx5e_ipsec_modify_state_work {
- struct work_struct work;
- struct mlx5_accel_esp_xfrm_attrs attrs;
+struct mlx5e_ipsec_limits {
+ u64 round;
+ u8 soft_limit_hit : 1;
+ u8 fix_limit : 1;
};
struct mlx5e_ipsec_sa_entry {
@@ -180,7 +206,9 @@ struct mlx5e_ipsec_sa_entry {
u32 ipsec_obj_id;
u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule;
- struct mlx5e_ipsec_modify_state_work modify_work;
+ struct mlx5e_ipsec_work *work;
+ struct mlx5e_ipsec_dwork *dwork;
+ struct mlx5e_ipsec_limits limits;
};
struct mlx5_accel_pol_xfrm_attrs {
@@ -222,6 +250,8 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
+bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -257,7 +287,7 @@ static inline bool addr6_all_zero(__be32 *addr6)
{
static const __be32 zaddr6[4] = {};
- return !memcmp(addr6, zaddr6, sizeof(*zaddr6));
+ return !memcmp(addr6, zaddr6, sizeof(zaddr6));
}
#else
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0539640a4d88..dbe87bf89c0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -4,12 +4,15 @@
#include <linux/netdevice.h>
#include "en.h"
#include "en/fs.h"
+#include "eswitch.h"
#include "ipsec.h"
#include "fs_core.h"
#include "lib/ipsec_fs_roce.h"
#include "lib/fs_chains.h"
#define NUM_IPSEC_FTE BIT(15)
+#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
+#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
@@ -36,6 +39,7 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_rule status;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
};
struct mlx5e_ipsec_tx {
@@ -45,6 +49,7 @@ struct mlx5e_ipsec_tx {
struct mlx5_flow_namespace *ns;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
};
/* IPsec RX flow steering */
@@ -118,7 +123,7 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
- int max_num_groups)
+ int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
@@ -127,6 +132,7 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
ft_attr.prio = prio;
+ ft_attr.flags = flags;
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
@@ -251,7 +257,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
-
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
mlx5_destroy_flow_table(rx->ft.status);
@@ -267,6 +274,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination default_dest;
struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
+ u32 flags = 0;
int err;
default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
@@ -277,7 +285,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
return err;
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
- MLX5E_NIC_PRIO, 1);
+ MLX5E_NIC_PRIO, 1, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@@ -300,8 +308,12 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
goto err_add;
/* Create FT */
- ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
- 2);
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ if (rx->allow_tunnel_mode)
+ flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO, 2,
+ flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -327,7 +339,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
}
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
- 2);
+ 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -356,6 +368,8 @@ err_pol_ft:
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
@@ -490,7 +504,8 @@ err_rule:
}
/* IPsec TX flow steering */
-static void tx_destroy(struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce)
+static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+ struct mlx5_ipsec_fs *roce)
{
mlx5_ipsec_fs_roce_tx_destroy(roce);
if (tx->chains) {
@@ -502,6 +517,8 @@ static void tx_destroy(struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce)
}
mlx5_destroy_flow_table(tx->ft.sa);
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(tx->status.rule);
mlx5_destroy_flow_table(tx->ft.status);
}
@@ -511,9 +528,10 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft;
+ u32 flags = 0;
int err;
- ft = ipsec_ft_create(tx->ns, 2, 0, 1);
+ ft = ipsec_ft_create(tx->ns, 2, 0, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -522,7 +540,11 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
if (err)
goto err_status_rule;
- ft = ipsec_ft_create(tx->ns, 1, 0, 4);
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ if (tx->allow_tunnel_mode)
+ flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft = ipsec_ft_create(tx->ns, 1, 0, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -541,7 +563,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, 0, 0, 2);
+ ft = ipsec_ft_create(tx->ns, 0, 0, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -572,6 +594,8 @@ err_roce:
err_pol_ft:
mlx5_destroy_flow_table(tx->ft.sa);
err_sa_ft:
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(tx->status.rule);
err_status_rule:
mlx5_destroy_flow_table(tx->ft.status);
@@ -600,7 +624,7 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
if (--tx->ft.refcnt)
return;
- tx_destroy(tx, ipsec->roce);
+ tx_destroy(ipsec->mdev, tx, ipsec->roce);
}
static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
@@ -829,40 +853,181 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
return 0;
}
+static int
+setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ struct mlx5_pkt_reformat_params *reformat_params)
+{
+ struct ip_esp_hdr *esp_hdr;
+ struct ipv6hdr *ipv6hdr;
+ struct ethhdr *eth_hdr;
+ struct iphdr *iphdr;
+ char *reformatbf;
+ size_t bfflen;
+ void *hdr;
+
+ bfflen = sizeof(*eth_hdr);
+
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
+ bfflen += sizeof(*esp_hdr) + 8;
+
+ switch (attrs->family) {
+ case AF_INET:
+ bfflen += sizeof(*iphdr);
+ break;
+ case AF_INET6:
+ bfflen += sizeof(*ipv6hdr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ reformatbf = kzalloc(bfflen, GFP_KERNEL);
+ if (!reformatbf)
+ return -ENOMEM;
+
+ eth_hdr = (struct ethhdr *)reformatbf;
+ switch (attrs->family) {
+ case AF_INET:
+ eth_hdr->h_proto = htons(ETH_P_IP);
+ break;
+ case AF_INET6:
+ eth_hdr->h_proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ goto free_reformatbf;
+ }
+
+ ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
+ ether_addr_copy(eth_hdr->h_source, attrs->smac);
+
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
+ reformat_params->param_0 = attrs->authsize;
+
+ hdr = reformatbf + sizeof(*eth_hdr);
+ switch (attrs->family) {
+ case AF_INET:
+ iphdr = (struct iphdr *)hdr;
+ memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ iphdr->version = 4;
+ iphdr->ihl = 5;
+ iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
+ iphdr->protocol = IPPROTO_ESP;
+ hdr += sizeof(*iphdr);
+ break;
+ case AF_INET6:
+ ipv6hdr = (struct ipv6hdr *)hdr;
+ memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ ipv6hdr->nexthdr = IPPROTO_ESP;
+ ipv6hdr->version = 6;
+ ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
+ hdr += sizeof(*ipv6hdr);
+ break;
+ default:
+ goto free_reformatbf;
+ }
+
+ esp_hdr = (struct ip_esp_hdr *)hdr;
+ esp_hdr->spi = htonl(attrs->spi);
+ break;
+ default:
+ goto free_reformatbf;
+ }
+
+ reformat_params->size = bfflen;
+ reformat_params->data = reformatbf;
+ return 0;
+
+free_reformatbf:
+ kfree(reformatbf);
+ return -EINVAL;
+}
+
+static int
+setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
+ struct mlx5_pkt_reformat_params *reformat_params)
+{
+ u8 *reformatbf;
+ __be32 spi;
+
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ reformat_params->type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ if (attrs->family == AF_INET)
+ reformat_params->type =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+ else
+ reformat_params->type =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+
+ reformatbf = kzalloc(MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE,
+ GFP_KERNEL);
+ if (!reformatbf)
+ return -ENOMEM;
+
+ /* convert to network format */
+ spi = htonl(attrs->spi);
+ memcpy(reformatbf, &spi, sizeof(spi));
+
+ reformat_params->param_0 = attrs->authsize;
+ reformat_params->size =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
+ reformat_params->data = reformatbf;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5_flow_act *flow_act)
{
- enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
- u8 reformatbf[16] = {};
- __be32 spi;
+ enum mlx5_flow_namespace_type ns_type;
+ int ret;
- if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
- reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
- goto cmd;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ default:
+ return -EINVAL;
}
- if (attrs->family == AF_INET)
- reformat_params.type =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
- else
- reformat_params.type =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
-
- /* convert to network format */
- spi = htonl(attrs->spi);
- memcpy(reformatbf, &spi, 4);
+ switch (attrs->mode) {
+ case XFRM_MODE_TRANSPORT:
+ ret = setup_pkt_transport_reformat(attrs, &reformat_params);
+ break;
+ case XFRM_MODE_TUNNEL:
+ ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- reformat_params.param_0 = attrs->authsize;
- reformat_params.size = sizeof(reformatbf);
- reformat_params.data = &reformatbf;
+ if (ret)
+ return ret;
-cmd:
pkt_reformat =
mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
+ kfree(reformat_params.data);
if (IS_ERR(pkt_reformat))
return PTR_ERR(pkt_reformat);
@@ -926,9 +1091,12 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ if (attrs->drop)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ else
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1018,9 +1186,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ if (attrs->drop)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ else
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1080,16 +1252,16 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
- if (attrs->reqid) {
+ switch (attrs->action) {
+ case XFRM_POLICY_ALLOW:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (!attrs->reqid)
+ break;
+
err = setup_modify_header(mdev, attrs->reqid,
XFRM_DEV_OFFLOAD_OUT, &flow_act);
if (err)
goto err_mod_header;
- }
-
- switch (attrs->action) {
- case XFRM_POLICY_ALLOW:
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
@@ -1101,7 +1273,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
default:
WARN_ON(true);
err = -EINVAL;
- goto err_action;
+ goto err_mod_header;
}
flow_act.flags |= FLOW_ACT_NO_APPEND;
@@ -1121,7 +1293,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
return 0;
err_action:
- if (attrs->reqid)
+ if (flow_act.modify_hdr)
mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
err_mod_header:
kvfree(spec);
@@ -1430,3 +1602,31 @@ err_rx_ipv4:
kfree(ipsec->tx);
return err;
}
+
+void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
+ int err;
+
+ memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
+ memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
+
+ err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
+ if (err)
+ return;
+
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
+ memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
+}
+
+bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_rx *rx =
+ ipsec_rx(sa_entry->ipsec, sa_entry->attrs.family);
+ struct mlx5e_ipsec_tx *tx = sa_entry->ipsec->tx;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+ return tx->allow_tunnel_mode;
+
+ return rx->allow_tunnel_mode;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 5342b0b07681..df90e19066bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -8,6 +8,7 @@
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+ MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
};
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
@@ -47,6 +48,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
caps |= MLX5_IPSEC_CAP_PRIO;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_l2_to_l3_esp_tunnel) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ reformat_l3_esp_tunnel_to_l2))
+ caps |= MLX5_IPSEC_CAP_TUNNEL;
}
if (mlx5_get_roce_state(mdev) &&
@@ -75,15 +82,17 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
- if (attrs->esn_trigger) {
+ if (attrs->replay_esn.trigger) {
MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
- attrs->replay_window / 64);
+ attrs->replay_esn.replay_window / 64);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
- }
+ }
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ attrs->replay_esn.esn);
}
/* ASO context */
@@ -100,15 +109,15 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
- if (attrs->hard_packet_limit != XFRM_INF) {
+ if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
- lower_32_bits(attrs->hard_packet_limit));
+ attrs->lft.hard_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
}
- if (attrs->soft_packet_limit != XFRM_INF) {
+ if (attrs->lft.soft_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
- lower_32_bits(attrs->soft_packet_limit));
+ attrs->lft.soft_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
}
@@ -135,10 +144,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
- if (attrs->esn_trigger) {
+ if (attrs->replay_esn.trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
- MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
}
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
@@ -224,9 +233,6 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
void *obj;
int err;
- if (!attrs->esn_trigger)
- return 0;
-
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return -EINVAL;
@@ -254,8 +260,8 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_SET64(ipsec_obj, obj, modify_field_select,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
- MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
@@ -275,29 +281,24 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
-static void
-mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
+static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_wqe_aso_ctrl_seg *data)
{
- struct mlx5_wqe_aso_ctrl_seg data = {};
-
- data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
- data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
- << 4;
- data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
- data.bitwise_data = cpu_to_be64(BIT_ULL(54));
- data.data_mask = data.bitwise_data;
+ data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
+ data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
+ MLX5_ASO_ALWAYS_TRUE << 4;
- mlx5e_ipsec_aso_query(sa_entry, &data);
+ mlx5e_ipsec_aso_query(sa_entry, data);
}
static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mode_param)
{
struct mlx5_accel_esp_xfrm_attrs attrs = {};
+ struct mlx5_wqe_aso_ctrl_seg data = {};
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
- sa_entry->esn_state.esn++;
+ sa_entry->esn_state.esn_msb++;
sa_entry->esn_state.overlap = 0;
} else {
sa_entry->esn_state.overlap = 1;
@@ -305,25 +306,129 @@ static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
- mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
+
+ data.data_offset_condition_operand =
+ MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ data.bitwise_data = cpu_to_be64(BIT_ULL(54));
+ data.data_mask = data.bitwise_data;
+
+ mlx5e_ipsec_aso_update(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_wqe_aso_ctrl_seg data = {};
+
+ data.data_offset_condition_operand =
+ MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
+ data.data_mask = data.bitwise_data;
+ mlx5e_ipsec_aso_update(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u32 val)
+{
+ struct mlx5_wqe_aso_ctrl_seg data = {};
+
+ data.data_offset_condition_operand =
+ MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
+ data.bitwise_data = cpu_to_be64(val);
+ data.data_mask = cpu_to_be64(U32_MAX);
+ mlx5e_ipsec_aso_update(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_aso *aso = ipsec->aso;
+ bool soft_arm, hard_arm;
+ u64 hard_cnt;
+
+ lockdep_assert_held(&sa_entry->x->lock);
+
+ soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
+ hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
+ if (!soft_arm && !hard_arm)
+ /* It is not lifetime event */
+ return;
+
+ hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
+ if (!hard_cnt || hard_arm) {
+ /* It is possible to see packet counter equal to zero without
+ * hard limit event armed. Such situation can be if packet
+ * decreased, while we handled soft limit event.
+ *
+ * However it will be HW/FW bug if hard limit event is raised
+ * and packet counter is not zero.
+ */
+ WARN_ON_ONCE(hard_arm && hard_cnt);
+
+ /* Notify about hard limit */
+ xfrm_state_check_expire(sa_entry->x);
+ return;
+ }
+
+ /* We are in soft limit event. */
+ if (!sa_entry->limits.soft_limit_hit &&
+ sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
+ sa_entry->limits.soft_limit_hit = true;
+ /* Notify about soft limit */
+ xfrm_state_check_expire(sa_entry->x);
+
+ if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
+ goto hard;
+
+ if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
+ /* We cannot avoid a soft_value that might have the high
+ * bit set. For instance soft_value=2^31+1 cannot be
+ * adjusted to the low bit clear version of soft_value=1
+ * because it is too close to 0.
+ *
+ * Thus we have this corner case where we can hit the
+ * soft_limit with the high bit set, but cannot adjust
+ * the counter. Thus we set a temporary interrupt_value
+ * at least 2^30 away from here and do the adjustment
+ * then.
+ */
+ mlx5e_ipsec_aso_update_soft(sa_entry,
+ BIT_ULL(31) - BIT_ULL(30));
+ sa_entry->limits.fix_limit = true;
+ return;
+ }
+
+ sa_entry->limits.fix_limit = true;
+ }
+
+hard:
+ if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
+ mlx5e_ipsec_aso_update_soft(sa_entry, 0);
+ attrs->lft.soft_packet_limit = XFRM_INF;
+ return;
+ }
+
+ mlx5e_ipsec_aso_update_hard(sa_entry);
+ sa_entry->limits.round++;
+ if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
+ mlx5e_ipsec_aso_update_soft(sa_entry,
+ attrs->lft.soft_packet_limit);
+ if (sa_entry->limits.fix_limit) {
+ sa_entry->limits.fix_limit = false;
+ mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
+ }
}
static void mlx5e_ipsec_handle_event(struct work_struct *_work)
{
struct mlx5e_ipsec_work *work =
container_of(_work, struct mlx5e_ipsec_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
struct mlx5_accel_esp_xfrm_attrs *attrs;
- struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_aso *aso;
- struct mlx5e_ipsec *ipsec;
int ret;
- sa_entry = xa_load(&work->ipsec->sadb, work->id);
- if (!sa_entry)
- goto out;
-
- ipsec = sa_entry->ipsec;
- aso = ipsec->aso;
+ aso = sa_entry->ipsec->aso;
attrs = &sa_entry->attrs;
spin_lock(&sa_entry->x->lock);
@@ -331,21 +436,18 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
if (ret)
goto unlock;
- if (attrs->esn_trigger &&
+ if (attrs->replay_esn.trigger &&
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
}
- if (attrs->soft_packet_limit != XFRM_INF)
- if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm))
- xfrm_state_check_expire(sa_entry->x);
+ if (attrs->lft.soft_packet_limit != XFRM_INF)
+ mlx5e_ipsec_handle_limits(sa_entry);
unlock:
spin_unlock(&sa_entry->x->lock);
-out:
kfree(work);
}
@@ -353,6 +455,7 @@ static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
void *data)
{
struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
+ struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5_eqe_obj_change *object;
struct mlx5e_ipsec_work *work;
struct mlx5_eqe *eqe = data;
@@ -367,13 +470,16 @@ static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
return NOTIFY_DONE;
+ sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
+ if (!sa_entry)
+ return NOTIFY_DONE;
+
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
- work->ipsec = ipsec;
- work->id = be32_to_cpu(object->obj_id);
+ work->data = sa_entry;
queue_work(ipsec->wq, &work->work);
return NOTIFY_OK;
@@ -464,6 +570,7 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *ctrl;
struct mlx5e_hw_objs *res;
struct mlx5_aso_wqe *wqe;
+ unsigned long expires;
u8 ds_cnt;
int ret;
@@ -485,7 +592,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
- ret = mlx5_aso_poll_cq(aso->aso, false);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ ret = mlx5_aso_poll_cq(aso->aso, false);
+ if (ret)
+ usleep_range(2, 10);
+ } while (ret && time_is_after_jiffies(expires));
spin_unlock_bh(&aso->lock);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 33b3620ea45c..f3428dbeb298 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -4,6 +4,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/xarray.h>
+#include <linux/if_vlan.h>
#include "en.h"
#include "lib/aso.h"
@@ -348,12 +349,21 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
sa->macsec_rule = NULL;
}
+static struct mlx5e_priv *macsec_netdev_priv(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (is_vlan_dev(dev))
+ return netdev_priv(vlan_dev_priv(dev)->real_dev);
+#endif
+ return netdev_priv(dev);
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt,
bool is_tx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -427,7 +437,7 @@ static int macsec_rx_sa_active_update(struct macsec_context *ctx,
struct mlx5e_macsec_sa *rx_sa,
bool active)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
int err = 0;
@@ -508,9 +518,9 @@ static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_ke
static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
const struct macsec_secy *secy = ctx->secy;
struct mlx5e_macsec_device *macsec_device;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -583,9 +593,9 @@ out:
static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
u8 assoc_num = ctx->sa.assoc_num;
struct mlx5e_macsec_sa *tx_sa;
@@ -645,7 +655,7 @@ out:
static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
u8 assoc_num = ctx->sa.assoc_num;
struct mlx5e_macsec_sa *tx_sa;
@@ -696,7 +706,7 @@ static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t
static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
{
struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc;
@@ -776,7 +786,7 @@ out:
static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc;
@@ -854,7 +864,7 @@ static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec
static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc;
struct mlx5e_macsec *macsec;
@@ -890,8 +900,8 @@ out:
static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5_core_dev *mdev = priv->mdev;
u8 assoc_num = ctx->sa.assoc_num;
@@ -976,8 +986,8 @@ out:
static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
u8 assoc_num = ctx->sa.assoc_num;
struct mlx5e_macsec_rx_sc *rx_sc;
@@ -1033,7 +1043,7 @@ out:
static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
sci_t sci = ctx->sa.rx_sa->sc->sci;
struct mlx5e_macsec_rx_sc *rx_sc;
@@ -1085,7 +1095,7 @@ out:
static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct net_device *dev = ctx->secy->netdev;
const struct net_device *netdev = ctx->netdev;
struct mlx5e_macsec_device *macsec_device;
@@ -1137,7 +1147,7 @@ out:
static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
struct mlx5e_macsec_device *macsec_device)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct net_device *dev = ctx->secy->netdev;
struct mlx5e_macsec *macsec = priv->macsec;
struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
@@ -1184,8 +1194,8 @@ out:
*/
static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
const struct net_device *dev = ctx->secy->netdev;
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_sa *tx_sa;
@@ -1240,7 +1250,7 @@ out:
static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
{
- struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
struct mlx5e_macsec_sa *tx_sa;
@@ -1741,7 +1751,7 @@ void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
{
struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
struct mlx5e_macsec_rx_sc *rx_sc;
struct mlx5e_macsec *macsec;
u32 fs_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
index 5b658a5588c6..7fc901a6ec5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -4,6 +4,7 @@
#include <net/macsec.h>
#include <linux/netdevice.h>
#include <linux/mlx5/qp.h>
+#include <linux/if_vlan.h>
#include "fs_core.h"
#include "en/fs.h"
#include "en_accel/macsec_fs.h"
@@ -292,8 +293,6 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
}
/* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
- memset(&flow_act, 0, sizeof(flow_act));
- memset(spec, 0, sizeof(*spec));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
@@ -510,6 +509,8 @@ static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
macsec_fs_tx_ft_put(macsec_fs);
}
+#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
+
static union mlx5e_macsec_rule *
macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
const struct macsec_context *macsec_ctx,
@@ -555,6 +556,10 @@ macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
reformat_params.size = reformat_size;
reformat_params.data = reformatbf;
+
+ if (is_vlan_dev(macsec_ctx->netdev))
+ reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
+
flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
&reformat_params,
MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
@@ -1109,7 +1114,6 @@ static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
static union mlx5e_macsec_rule *
macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *macsec_ctx,
struct mlx5_macsec_rule_attrs *attrs,
u32 fs_id)
{
@@ -1334,7 +1338,7 @@ mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
{
return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
- macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+ macsec_fs_rx_add_rule(macsec_fs, attrs, *sa_fs_id);
}
void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ec72743b64e2..2fda7385de71 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -803,6 +803,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
+ pool_size *= 2; /* additional page per packet for the linear part */
+
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
@@ -1300,17 +1303,19 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of
+ * entries of all xmit_modes.
+ */
size_t size;
- size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ size = array_size(sizeof(*xdpi_fifo->xi), entries);
xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
xdpi_fifo->pc = &sq->xdpi_fifo_pc;
xdpi_fifo->cc = &sq->xdpi_fifo_cc;
- xdpi_fifo->mask = dsegs_per_wq - 1;
+ xdpi_fifo->mask = entries - 1;
return 0;
}
@@ -1860,11 +1865,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
- * supported by upstream, and there is no defined trigger to allow
- * transmitting redirected multi-buffer frames.
- */
- if (param->is_xdp_mb && !is_redirect)
+ if (param->is_xdp_mb)
set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
@@ -1888,7 +1889,6 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
.num_wqebbs = 1,
@@ -1897,9 +1897,6 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
-
- dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
- dseg->lkey = sq->mkey_be;
}
}
@@ -4066,9 +4063,9 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_NDO_XMIT;
- if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
- val |= NETDEV_XDP_ACT_RX_SG;
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
xdp_set_features_flag(netdev, val);
}
@@ -4262,19 +4259,24 @@ static bool mlx5e_params_validate_xdp(struct net_device *netdev,
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
- is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL);
-
- if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
- params->sw_mtu,
- mlx5e_xdp_max_mtu(params, NULL));
- return false;
- }
- if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
- netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
- params->sw_mtu,
- mlx5e_xdp_max_mtu(params, NULL));
- return false;
+ is_linear = params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC ?
+ mlx5e_rx_is_linear_skb(mdev, params, NULL) :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
+
+ if (!is_linear) {
+ if (!params->xdp_prog->aux->xdp_has_frags) {
+ netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
+ netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
}
return true;
@@ -4766,20 +4768,15 @@ static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
queue_work(priv->wq, &priv->tx_timeout_work);
}
-static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+static int mlx5e_xdp_allowed(struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
- struct net_device *netdev = priv->netdev;
- struct mlx5e_params new_params;
-
- if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
return -EINVAL;
}
- new_params = priv->channels.params;
- new_params.xdp_prog = prog;
-
- if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params))
+ if (!mlx5e_params_validate_xdp(netdev, mdev, params))
return -EINVAL;
return 0;
@@ -4806,8 +4803,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mutex_lock(&priv->state_lock);
+ new_params = priv->channels.params;
+ new_params.xdp_prog = prog;
+
if (prog) {
- err = mlx5e_xdp_allowed(priv, prog);
+ err = mlx5e_xdp_allowed(netdev, priv->mdev, &new_params);
if (err)
goto unlock;
}
@@ -4815,22 +4815,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
- new_params = priv->channels.params;
- new_params.xdp_prog = prog;
-
- /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
- * supported with the new parameters: if PAGE_SIZE is bigger than
- * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
- * the MTU is small enough for the linear mode, because XDP uses strides
- * of PAGE_SIZE on regular RQs.
- */
- if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- /* Checking for regular RQs here; XSK RQs were checked on XSK bind. */
- err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params);
- if (err)
- goto unlock;
- }
-
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
@@ -5125,6 +5109,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_SG;
netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_HW_MACSEC;
netdev->vlan_features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1049805571c6..a8c2ae389d6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -471,6 +471,35 @@ static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
return i;
}
+static void
+mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
+ struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
+ u32 frag_offset, u32 len)
+{
+ skb_frag_t *frag;
+
+ dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+
+ dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
+ if (!xdp_buff_has_frags(xdp)) {
+ /* Init on the first fragment to avoid cold cache access
+ * when possible.
+ */
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ __skb_frag_set_page(frag, frag_page->page);
+ skb_frag_off_set(frag, frag_offset);
+ skb_frag_size_set(frag, len);
+
+ if (page_is_pfmemalloc(frag_page->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+ sinfo->xdp_frags_size += len;
+}
+
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
struct page *page, u32 frag_offset, u32 len,
@@ -1601,10 +1630,10 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
}
static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- void *va, u16 headroom, u32 len,
+ void *va, u16 headroom, u32 frame_sz, u32 len,
struct mlx5e_xdp_buff *mxbuf)
{
- xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
mxbuf->cqe = cqe;
mxbuf->rq = rq;
@@ -1637,7 +1666,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
+ cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */
@@ -1685,7 +1715,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
- mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
+ frag_consumed_bytes, &mxbuf);
sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;
@@ -1694,35 +1725,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
wi++;
while (cqe_bcnt) {
- skb_frag_t *frag;
-
frag_page = wi->frag_page;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
- dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
- frag_consumed_bytes, rq->buff.map_dir);
-
- if (!xdp_buff_has_frags(&mxbuf.xdp)) {
- /* Init on the first fragment to avoid cold cache access
- * when possible.
- */
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(&mxbuf.xdp);
- }
-
- frag = &sinfo->frags[sinfo->nr_frags++];
-
- __skb_frag_set_page(frag, frag_page->page);
- skb_frag_off_set(frag, wi->offset);
- skb_frag_size_set(frag, frag_consumed_bytes);
-
- if (page_is_pfmemalloc(frag_page->page))
- xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
-
- sinfo->xdp_frags_size += frag_consumed_bytes;
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
+ wi->offset, frag_consumed_bytes);
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
@@ -1969,35 +1977,139 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
- u32 frag_offset = head_offset + headlen;
- u32 byte_cnt = cqe_bcnt - headlen;
+ u32 frag_offset = head_offset;
+ u32 byte_cnt = cqe_bcnt;
+ struct skb_shared_info *sinfo;
+ struct mlx5e_xdp_buff mxbuf;
+ unsigned int truesize = 0;
+ struct bpf_prog *prog;
struct sk_buff *skb;
- dma_addr_t addr;
+ u32 linear_frame_sz;
+ u16 linear_data_len;
+ u16 linear_hr;
+ void *va;
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats->buff_alloc_err++;
- return NULL;
+ prog = rcu_dereference(rq->xdp_prog);
+
+ if (prog) {
+ /* area for bpf_xdp_[store|load]_bytes */
+ net_prefetchw(page_address(frag_page->page) + frag_offset);
+ if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+ rq->stats->buff_alloc_err++;
+ return NULL;
+ }
+ va = page_address(wi->linear_page.page);
+ net_prefetchw(va); /* xdp_frame data area */
+ linear_hr = XDP_PACKET_HEADROOM;
+ linear_data_len = 0;
+ linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
+ } else {
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ va = skb->head;
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetchw(skb->data);
+
+ frag_offset += headlen;
+ byte_cnt -= headlen;
+ linear_hr = skb_headroom(skb);
+ linear_data_len = headlen;
+ linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
+ if (unlikely(frag_offset >= PAGE_SIZE)) {
+ frag_page++;
+ frag_offset -= PAGE_SIZE;
+ }
}
- net_prefetchw(skb->data);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+
+ while (byte_cnt) {
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
+ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- if (unlikely(frag_offset >= PAGE_SIZE)) {
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ truesize += pg_consumed_bytes;
+ else
+ truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ pg_consumed_bytes);
+ byte_cnt -= pg_consumed_bytes;
+ frag_offset = 0;
frag_page++;
- frag_offset -= PAGE_SIZE;
}
- skb_mark_for_recycle(skb);
- mlx5e_fill_skb_data(skb, rq, frag_page, byte_cnt, frag_offset);
- /* copy header */
- addr = page_pool_get_dma_addr(head_page->page);
- mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
- head_offset, head_offset, headlen);
- /* skb linear part was allocated with headlen and aligned to long */
- skb->tail += headlen;
- skb->len += headlen;
+ if (prog) {
+ if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ int i;
+
+ for (i = 0; i < sinfo->nr_frags; i++)
+ /* non-atomic */
+ __set_bit(page_idx + i, wi->skip_release_bitmap);
+ return NULL;
+ }
+ mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
+ linear_frame_sz,
+ mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
+ mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ if (unlikely(!skb)) {
+ mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ return NULL;
+ }
+
+ skb_mark_for_recycle(skb);
+ wi->linear_page.frags++;
+ mlx5e_page_release_fragmented(rq, &wi->linear_page);
+
+ if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ struct mlx5e_frag_page *pagep;
+
+ /* sinfo->nr_frags is reset by build_skb, calculate again. */
+ xdp_update_skb_shared_info(skb, frag_page - head_page,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+
+ pagep = head_page;
+ do
+ pagep->frags++;
+ while (++pagep < frag_page);
+ }
+ __pskb_pull_tail(skb, headlen);
+ } else {
+ dma_addr_t addr;
+
+ if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ struct mlx5e_frag_page *pagep;
+
+ xdp_update_skb_shared_info(skb, sinfo->nr_frags,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+
+ pagep = frag_page - sinfo->nr_frags;
+ do
+ pagep->frags++;
+ while (++pagep < frag_page);
+ }
+ /* copy header */
+ addr = page_pool_get_dma_addr(head_page->page);
+ mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
+ head_offset, head_offset, headlen);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+ }
return skb;
}
@@ -2036,7 +2148,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
+ cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 083ce31f95b6..728b82ce4031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -177,7 +177,8 @@ static struct lock_class_key tc_ht_wq_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
-static void mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr);
+static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -487,15 +488,6 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
- if (attr->flags & MLX5_ATTR_FLAG_CT) {
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
- &attr->parse_attr->mod_hdr_acts;
-
- return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
- spec, attr,
- mod_hdr_acts);
- }
-
if (!is_mdev_switchdev_mode(priv->mdev))
return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
@@ -518,11 +510,6 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (attr->flags & MLX5_ATTR_FLAG_CT) {
- mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
- return;
- }
-
if (!is_mdev_switchdev_mode(priv->mdev)) {
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
return;
@@ -1395,13 +1382,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return err;
}
- if (attr->flags & MLX5_ATTR_FLAG_CT)
- flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
- attr, &parse_attr->mod_hdr_acts);
- else
- flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
- attr);
-
+ flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr);
return PTR_ERR_OR_ZERO(flow->rule[0]);
}
@@ -1432,9 +1413,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
flow_flag_clear(flow, OFFLOADED);
- if (attr->flags & MLX5_ATTR_FLAG_CT)
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
- else if (!IS_ERR_OR_NULL(flow->rule[0]))
+ if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
/* Remove root table if no rules are left to avoid
@@ -1785,8 +1764,7 @@ out:
static void
clean_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr,
- bool *vf_tun)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr;
int out_index;
@@ -1795,17 +1773,11 @@ clean_encap_dests(struct mlx5e_priv *priv,
return;
esw_attr = attr->esw_attr;
- *vf_tun = false;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- *vf_tun = true;
-
mlx5e_detach_encap(priv, flow, attr, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
@@ -2028,7 +2000,7 @@ static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *
if (!attr)
return;
- mlx5_free_flow_attr(flow, attr);
+ mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->parse_attr);
kfree(attr);
}
@@ -2039,7 +2011,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- bool vf_tun;
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
@@ -2061,18 +2032,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
- clean_encap_dests(priv, flow, attr, &vf_tun);
-
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- mlx5e_tc_detach_mod_hdr(priv, flow, attr);
- }
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
-
if (esw_attr->int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
@@ -2085,8 +2046,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
- free_branch_attr(flow, attr->branch_true);
- free_branch_attr(flow, attr->branch_false);
+ mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
@@ -3463,114 +3423,59 @@ struct ipv6_hoplimit_word {
};
static bool
-is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
- bool *modify_ip_header, bool *modify_tuple,
- struct netlink_ext_ack *extack)
+is_flow_action_modify_ip_header(struct flow_action *flow_action)
{
+ const struct flow_action_entry *act;
u32 mask, offset;
u8 htype;
+ int i;
- htype = act->mangle.htype;
- offset = act->mangle.offset;
- mask = ~act->mangle.mask;
/* For IPv4 & IPv6 header check 4 byte word,
* to determine that modified fields
* are NOT ttl & hop_limit only.
*/
- if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
- struct ip_ttl_word *ttl_word =
- (struct ip_ttl_word *)&mask;
-
- if (offset != offsetof(struct iphdr, ttl) ||
- ttl_word->protocol ||
- ttl_word->check) {
- *modify_ip_header = true;
- }
-
- if (offset >= offsetof(struct iphdr, saddr))
- *modify_tuple = true;
-
- if (ct_flow && *modify_tuple) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload re-write of ipv4 address with action ct");
- return false;
- }
- } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
- struct ipv6_hoplimit_word *hoplimit_word =
- (struct ipv6_hoplimit_word *)&mask;
-
- if (offset != offsetof(struct ipv6hdr, payload_len) ||
- hoplimit_word->payload_len ||
- hoplimit_word->nexthdr) {
- *modify_ip_header = true;
- }
-
- if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
- *modify_tuple = true;
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id != FLOW_ACTION_MANGLE &&
+ act->id != FLOW_ACTION_ADD)
+ continue;
- if (ct_flow && *modify_tuple) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload re-write of ipv6 address with action ct");
- return false;
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+ mask = ~act->mangle.mask;
+
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ struct ip_ttl_word *ttl_word =
+ (struct ip_ttl_word *)&mask;
+
+ if (offset != offsetof(struct iphdr, ttl) ||
+ ttl_word->protocol ||
+ ttl_word->check)
+ return true;
+ } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ struct ipv6_hoplimit_word *hoplimit_word =
+ (struct ipv6_hoplimit_word *)&mask;
+
+ if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ hoplimit_word->payload_len ||
+ hoplimit_word->nexthdr)
+ return true;
}
- } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
- htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
- *modify_tuple = true;
- if (ct_flow) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload re-write of transport header ports with action ct");
- return false;
- }
- }
-
- return true;
-}
-
-static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
- bool ct_flow, struct netlink_ext_ack *extack,
- struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
-{
- if (!modify_tuple || ct_clear)
- return true;
-
- if (ct_flow) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload tuple modification with non-clear ct()");
- netdev_info(priv->netdev,
- "can't offload tuple modification with non-clear ct()");
- return false;
- }
-
- /* Add ct_state=-trk match so it will be offloaded for non ct flows
- * (or after clear action), as otherwise, since the tuple is changed,
- * we can't restore ct state
- */
- if (mlx5_tc_ct_add_no_trk_match(spec)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload tuple modification with ct matches and no ct(clear) action");
- netdev_info(priv->netdev,
- "can't offload tuple modification with ct matches and no ct(clear) action");
- return false;
}
- return true;
+ return false;
}
static bool modify_header_match_supported(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
- u32 actions, bool ct_flow,
- bool ct_clear,
+ u32 actions,
struct netlink_ext_ack *extack)
{
- const struct flow_action_entry *act;
- bool modify_ip_header, modify_tuple;
+ bool modify_ip_header;
void *headers_c;
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i;
headers_c = mlx5e_get_match_headers_criteria(actions, spec);
headers_v = mlx5e_get_match_headers_value(actions, spec);
@@ -3581,23 +3486,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
goto out_ok;
- modify_ip_header = false;
- modify_tuple = false;
- flow_action_for_each(i, act, flow_action) {
- if (act->id != FLOW_ACTION_MANGLE &&
- act->id != FLOW_ACTION_ADD)
- continue;
-
- if (!is_action_keys_supported(act, ct_flow,
- &modify_ip_header,
- &modify_tuple, extack))
- return false;
- }
-
- if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
- priv, spec))
- return false;
-
+ modify_ip_header = is_flow_action_modify_ip_header(flow_action);
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
@@ -3618,19 +3507,6 @@ actions_match_supported_fdb(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
- bool ct_flow, ct_clear;
-
- ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
- ct_flow = flow_flag_test(flow, CT) && !ct_clear;
-
- if (esw_attr->split_count && ct_flow &&
- !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
- /* All registers used by ct are cleared when using
- * split rules.
- */
- NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
- return false;
- }
if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -3651,14 +3527,9 @@ actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- bool ct_flow, ct_clear;
-
- ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
- ct_flow = flow_flag_test(flow, CT) && !ct_clear;
-
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
- actions, ct_flow, ct_clear, extack))
+ !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions,
+ extack))
return false;
if (mlx5e_is_eswitch_flow(flow) &&
@@ -3752,6 +3623,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
attr2->act_id_restore_rule = NULL;
+ memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr));
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
@@ -3805,9 +3677,7 @@ free_flow_post_acts(struct mlx5e_tc_flow *flow)
if (list_is_last(&attr->list, &flow->attrs))
break;
- mlx5_free_flow_attr(flow, attr);
- free_branch_attr(flow, attr->branch_true);
- free_branch_attr(flow, attr->branch_false);
+ mlx5_free_flow_attr_actions(flow, attr);
list_del(&attr->list);
kvfree(attr->parse_attr);
@@ -4065,76 +3935,79 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow_action flow_action_reorder;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_jump_state jump_state = {};
struct mlx5_flow_attr *attr = flow->attr;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_priv *priv = flow->priv;
- struct flow_action_entry *act, **_act;
+ struct mlx5_flow_attr *prev_attr;
+ struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
+ bool is_missable;
int err, i;
- flow_action_reorder.num_entries = flow_action->num_entries;
- flow_action_reorder.entries = kcalloc(flow_action->num_entries,
- sizeof(flow_action), GFP_KERNEL);
- if (!flow_action_reorder.entries)
- return -ENOMEM;
-
- mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
-
ns_type = mlx5e_get_flow_namespace(flow);
list_add(&attr->list, &flow->attrs);
- flow_action_for_each(i, _act, &flow_action_reorder) {
+ flow_action_for_each(i, act, flow_action) {
jump_state.jump_target = false;
- act = *_act;
+ is_missable = false;
+ prev_attr = attr;
+
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act) {
NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
err = -EOPNOTSUPP;
- goto out_free;
+ goto out_free_post_acts;
}
- if (!tc_act->can_offload(parse_state, act, i, attr)) {
+ if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) {
err = -EOPNOTSUPP;
- goto out_free;
+ goto out_free_post_acts;
}
err = tc_act->parse_action(parse_state, act, priv, attr);
if (err)
- goto out_free;
+ goto out_free_post_acts;
dec_jump_count(act, tc_act, attr, priv, &jump_state);
err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
if (err)
- goto out_free;
+ goto out_free_post_acts;
parse_state->actions |= attr->action;
- if (!tc_act->stats_action)
- attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
/* Split attr for multi table act if not the last act. */
if (jump_state.jump_target ||
(tc_act->is_multi_table_act &&
tc_act->is_multi_table_act(priv, act, attr) &&
- i < flow_action_reorder.num_entries - 1)) {
+ i < flow_action->num_entries - 1)) {
+ is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
+
err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
if (err)
- goto out_free;
+ goto out_free_post_acts;
attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
if (!attr) {
err = -ENOMEM;
- goto out_free;
+ goto out_free_post_acts;
}
list_add(&attr->list, &flow->attrs);
}
- }
- kfree(flow_action_reorder.entries);
+ if (is_missable) {
+ /* Add counter to prev, and assign act to new (next) attr */
+ prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_flag_set(flow, USE_ACT_STATS);
+
+ attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
+ } else if (!tc_act->stats_action) {
+ prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie;
+ }
+ }
err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
if (err)
@@ -4146,8 +4019,6 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
return 0;
-out_free:
- kfree(flow_action_reorder.entries);
out_free_post_acts:
free_flow_post_acts(flow);
@@ -4442,10 +4313,9 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
}
static void
-mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
- bool vf_tun;
if (!attr)
return;
@@ -4453,7 +4323,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
if (attr->post_act_handle)
mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
- clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+ clean_encap_dests(flow->priv, flow, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(counter_dev, attr->counter);
@@ -4462,6 +4332,11 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
+
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
+
+ free_branch_attr(flow, attr->branch_true);
+ free_branch_attr(flow, attr->branch_false);
}
static int
@@ -4923,7 +4798,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto errout;
}
- if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
+ if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, USE_ACT_STATS)) {
f->use_act_stats = true;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index eb41f0abf798..1c35d721a31d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -1070,10 +1070,11 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
list_for_each_entry(eq, &table->comp_eqs_list, list) {
if (i++ == vector)
- break;
+ return mlx5_irq_get_affinity_mask(eq->core.irq);
}
- return mlx5_irq_get_affinity_mask(eq->core.irq);
+ WARN_ON_ONCE(1);
+ return NULL;
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 3cdcb0e0b20f..1ba03e219111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -13,66 +13,6 @@
#define CREATE_TRACE_POINTS
#include "diag/bridge_tracepoint.h"
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
- MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
-static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
-
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
- MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
- MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
- (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
-static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
-
-#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
-
-enum {
- MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
- MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
- MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
-};
-
static const struct rhashtable_params fdb_ht_params = {
.key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
.key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
@@ -80,31 +20,6 @@ static const struct rhashtable_params fdb_ht_params = {
.automatic_shrinking = true,
};
-enum {
- MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
-};
-
-struct mlx5_esw_bridge {
- int ifindex;
- int refcnt;
- struct list_head list;
- struct mlx5_esw_bridge_offloads *br_offloads;
-
- struct list_head fdb_list;
- struct rhashtable fdb_ht;
-
- struct mlx5_flow_table *egress_ft;
- struct mlx5_flow_group *egress_vlan_fg;
- struct mlx5_flow_group *egress_qinq_fg;
- struct mlx5_flow_group *egress_mac_fg;
- struct mlx5_flow_group *egress_miss_fg;
- struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
- struct mlx5_flow_handle *egress_miss_handle;
- unsigned long ageing_time;
- u32 flags;
- u16 vlan_proto;
-};
-
static void
mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
unsigned long val)
@@ -146,7 +61,7 @@ mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
}
-static struct mlx5_flow_table *
+struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
{
struct mlx5_flow_table_attr ft_attr = {};
@@ -925,6 +840,10 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
if (err)
goto err_fdb_ht;
+ err = mlx5_esw_bridge_mdb_init(bridge);
+ if (err)
+ goto err_mdb_ht;
+
INIT_LIST_HEAD(&bridge->fdb_list);
bridge->ifindex = ifindex;
bridge->refcnt = 1;
@@ -934,6 +853,8 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
return bridge;
+err_mdb_ht:
+ rhashtable_destroy(&bridge->fdb_ht);
err_fdb_ht:
mlx5_esw_bridge_egress_table_cleanup(bridge);
err_egress_tbl:
@@ -953,7 +874,9 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
return;
mlx5_esw_bridge_egress_table_cleanup(bridge);
+ mlx5_esw_bridge_mcast_disable(bridge);
list_del(&bridge->list);
+ mlx5_esw_bridge_mdb_cleanup(bridge);
rhashtable_destroy(&bridge->fdb_ht);
kvfree(bridge);
@@ -993,7 +916,7 @@ static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_o
return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
}
-static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
+unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
{
return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
}
@@ -1018,6 +941,19 @@ static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
}
+static struct mlx5_esw_bridge *
+mlx5_esw_bridge_from_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_port *port;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port)
+ return NULL;
+
+ return port->bridge;
+}
+
static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
{
trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
@@ -1166,8 +1102,21 @@ mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct
}
static int
-mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
- struct mlx5_eswitch *esw)
+mlx5_esw_bridge_vlan_push_pop_fhs_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan)
+{
+ return mlx5_esw_bridge_vlan_mcast_init(vlan_proto, port, vlan);
+}
+
+static void
+mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(struct mlx5_esw_bridge_vlan *vlan)
+{
+ mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
+}
+
+static int
+mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
int err;
@@ -1185,10 +1134,16 @@ mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
if (err)
goto err_vlan_pop;
+
+ err = mlx5_esw_bridge_vlan_push_pop_fhs_create(vlan_proto, port, vlan);
+ if (err)
+ goto err_vlan_pop_fhs;
}
return 0;
+err_vlan_pop_fhs:
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
err_vlan_pop:
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
@@ -1213,7 +1168,7 @@ mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_
vlan->flags = flags;
INIT_LIST_HEAD(&vlan->fdb_list);
- err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
+ err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, port, vlan, esw);
if (err)
goto err_vlan_push_pop;
@@ -1225,6 +1180,8 @@ mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_
return vlan;
err_xa_insert:
+ if (vlan->mcast_handle)
+ mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
if (vlan->pkt_mod_hdr_push_mark)
@@ -1242,7 +1199,8 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
xa_erase(&port->vlans, vlan->vid);
}
-static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
+static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_eswitch *esw = bridge->br_offloads->esw;
@@ -1250,7 +1208,10 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
+ mlx5_esw_bridge_port_mdb_vlan_flush(port, vlan);
+ if (vlan->mcast_handle)
+ mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
if (vlan->pkt_mod_hdr_push_mark)
@@ -1264,7 +1225,7 @@ static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
struct mlx5_esw_bridge *bridge)
{
trace_mlx5_esw_bridge_vlan_cleanup(vlan);
- mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
mlx5_esw_bridge_vlan_erase(port, vlan);
kvfree(vlan);
}
@@ -1288,9 +1249,9 @@ static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port
int err;
xa_for_each(&port->vlans, i, vlan) {
- mlx5_esw_bridge_vlan_flush(vlan, bridge);
- err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
- br_offloads->esw);
+ mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
+ err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, port,
+ vlan, br_offloads->esw);
if (err) {
esw_warn(br_offloads->esw->dev,
"Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
@@ -1473,33 +1434,32 @@ err_ingress_fc_create:
int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
- port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port)
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
return -EINVAL;
- port->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
+ bridge->ageing_time = clock_t_to_jiffies(ageing_time);
return 0;
}
int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
bool filtering;
- port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port)
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
return -EINVAL;
- bridge = port->bridge;
filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
if (filtering == enable)
return 0;
mlx5_esw_bridge_fdb_flush(bridge);
+ mlx5_esw_bridge_mdb_flush(bridge);
if (enable)
bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
else
@@ -1511,15 +1471,13 @@ int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, boo
int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
- port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
- br_offloads);
- if (!port)
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id,
+ br_offloads);
+ if (!bridge)
return -EINVAL;
- bridge = port->bridge;
if (bridge->vlan_proto == proto)
return 0;
if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
@@ -1528,12 +1486,43 @@ int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 pro
}
mlx5_esw_bridge_fdb_flush(bridge);
+ mlx5_esw_bridge_mdb_flush(bridge);
bridge->vlan_proto = proto;
mlx5_esw_bridge_vlans_recreate(bridge);
return 0;
}
+int mlx5_esw_bridge_mcast_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ struct mlx5_esw_bridge *bridge;
+ int err = 0;
+ bool mcast;
+
+ if (!(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table) ||
+ MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table_limit_regc)) ||
+ !MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_uplink_hairpin) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
+ return -EOPNOTSUPP;
+
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
+ return -EINVAL;
+
+ mcast = bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG;
+ if (mcast == enable)
+ return 0;
+
+ if (enable)
+ err = mlx5_esw_bridge_mcast_enable(bridge);
+ else
+ mlx5_esw_bridge_mcast_disable(bridge);
+
+ return err;
+}
+
static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
@@ -1551,6 +1540,15 @@ static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16
port->bridge = bridge;
port->flags |= flags;
xa_init(&port->vlans);
+
+ err = mlx5_esw_bridge_port_mcast_init(port);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to initialize port multicast (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
+ port->vport_num, port->esw_owner_vhca_id, err);
+ goto err_port_mcast;
+ }
+
err = mlx5_esw_bridge_port_insert(port, br_offloads);
if (err) {
esw_warn(esw->dev,
@@ -1563,6 +1561,8 @@ static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16
return 0;
err_port_insert:
+ mlx5_esw_bridge_port_mcast_cleanup(port);
+err_port_mcast:
kvfree(port);
return err;
}
@@ -1580,6 +1580,7 @@ static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_off
trace_mlx5_esw_bridge_vport_cleanup(port);
mlx5_esw_bridge_port_vlans_flush(port, bridge);
+ mlx5_esw_bridge_port_mcast_cleanup(port);
mlx5_esw_bridge_port_erase(port, br_offloads);
kvfree(port);
mlx5_esw_bridge_put(br_offloads, bridge);
@@ -1711,14 +1712,12 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct switchdev_notifier_fdb_info *fdb_info)
{
struct mlx5_esw_bridge_fdb_entry *entry;
- struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
- port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port)
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
return;
- bridge = port->bridge;
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_debug(br_offloads->esw->dev,
@@ -1765,14 +1764,12 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
{
struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry;
- struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
- port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port)
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
return;
- bridge = port->bridge;
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_debug(esw->dev,
@@ -1806,6 +1803,64 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
}
}
+int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ const unsigned char *addr, u16 vid,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
+ int err;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ addr, vport_num);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ addr, vport_num);
+ return -EINVAL;
+ }
+
+ bridge = port->bridge;
+ if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (!vlan) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, vport_num);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, vport_num);
+ return -EINVAL;
+ }
+ }
+
+ err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
+ if (err) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, vport_num);
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx5_esw_bridge_port_mdb_del(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ const unsigned char *addr, u16 vid,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_port *port;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!port)
+ return;
+
+ mlx5_esw_bridge_port_mdb_detach(dev, port, addr, vid);
+}
+
static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_esw_bridge_port *port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index 10851a515bca..a9dd18c73d6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -25,12 +25,19 @@ struct mlx5_esw_bridge_offloads {
struct delayed_work update_work;
struct mlx5_flow_table *ingress_ft;
+ struct mlx5_flow_group *ingress_igmp_fg;
+ struct mlx5_flow_group *ingress_mld_fg;
struct mlx5_flow_group *ingress_vlan_fg;
struct mlx5_flow_group *ingress_vlan_filter_fg;
struct mlx5_flow_group *ingress_qinq_fg;
struct mlx5_flow_group *ingress_qinq_filter_fg;
struct mlx5_flow_group *ingress_mac_fg;
+ struct mlx5_flow_handle *igmp_handle;
+ struct mlx5_flow_handle *mld_query_handle;
+ struct mlx5_flow_handle *mld_report_handle;
+ struct mlx5_flow_handle *mld_done_handle;
+
struct mlx5_flow_table *skip_ft;
};
@@ -64,10 +71,20 @@ int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, boo
struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_mcast_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
+ struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ const unsigned char *addr, u16 vid,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_port_mdb_del(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ const unsigned char *addr, u16 vid,
+ struct mlx5_esw_bridge_offloads *br_offloads);
+
#endif /* __MLX5_ESW_BRIDGE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
new file mode 100644
index 000000000000..2eae594a5e80
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -0,0 +1,1126 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "lib/devcom.h"
+#include "bridge.h"
+#include "eswitch.h"
+#include "bridge_priv.h"
+#include "diag/bridge_tracepoint.h"
+
+static const struct rhashtable_params mdb_ht_params = {
+ .key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
+ .key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
+ .head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
+{
+ INIT_LIST_HEAD(&bridge->mdb_list);
+ return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
+}
+
+void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
+{
+ rhashtable_destroy(&bridge->mdb_ht);
+}
+
+static struct mlx5_esw_bridge_port *
+mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_mdb_entry *entry)
+{
+ return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
+}
+
+static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_mdb_entry *entry)
+{
+ int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
+
+ if (!err)
+ entry->num_ports++;
+ return err;
+}
+
+static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_mdb_entry *entry)
+{
+ xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
+ entry->num_ports--;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
+ };
+ int num_dests = entry->num_ports, i = 0;
+ struct mlx5_flow_destination *dests;
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *dmac_v, *dmac_c;
+ unsigned long idx;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
+ if (!dests) {
+ kvfree(rule_spec);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ xa_for_each(&entry->ports, idx, port) {
+ dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dests[i].ft = port->mcast.ft;
+ i++;
+ }
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
+ ether_addr_copy(dmac_v, entry->key.addr);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
+ eth_broadcast_addr(dmac_c);
+
+ if (entry->key.vid) {
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
+ entry->key.vid);
+ }
+
+ handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
+
+ kvfree(dests);
+ kvfree(rule_spec);
+ return handle;
+}
+
+static int
+mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_mdb_entry *entry)
+{
+ struct mlx5_flow_handle *handle;
+
+ handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
+ if (entry->egress_handle) {
+ mlx5_del_flow_rules(entry->egress_handle);
+ entry->egress_handle = NULL;
+ }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ entry->egress_handle = handle;
+ return 0;
+}
+
+static struct mlx5_esw_bridge_mdb_entry *
+mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
+ const unsigned char *addr, u16 vid)
+{
+ struct mlx5_esw_bridge_mdb_key key = {};
+
+ ether_addr_copy(key.addr, addr);
+ key.vid = vid;
+ return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
+}
+
+static struct mlx5_esw_bridge_mdb_entry *
+mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_esw_bridge_mdb_entry *entry;
+ int err;
+
+ entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(entry->key.addr, addr);
+ entry->key.vid = vid;
+ xa_init(&entry->ports);
+ err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ list_add(&entry->list, &bridge->mdb_list);
+
+ return entry;
+
+err_ht_insert:
+ xa_destroy(&entry->ports);
+ kvfree(entry);
+ return ERR_PTR(err);
+}
+
+static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
+ struct mlx5_esw_bridge_mdb_entry *entry)
+{
+ if (entry->egress_handle)
+ mlx5_del_flow_rules(entry->egress_handle);
+ list_del(&entry->list);
+ rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
+ xa_destroy(&entry->ports);
+ kvfree(entry);
+}
+
+int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_esw_bridge_mdb_entry *entry;
+ int err;
+
+ if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
+ return -EOPNOTSUPP;
+
+ entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
+ if (entry) {
+ if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
+ esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, port->vport_num);
+ return 0;
+ }
+ } else {
+ entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
+ if (IS_ERR(entry)) {
+ err = PTR_ERR(entry);
+ esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
+ addr, vid, port->vport_num, err);
+ return err;
+ }
+ }
+
+ err = mlx5_esw_bridge_mdb_port_insert(port, entry);
+ if (err) {
+ if (!entry->num_ports)
+ mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
+ esw_warn(bridge->br_offloads->esw->dev,
+ "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
+ addr, vid, port->vport_num, err);
+ return err;
+ }
+
+ err = mlx5_esw_bridge_port_mdb_offload(port, entry);
+ if (err)
+ /* Single mdb can be used by multiple ports, so just log the
+ * error and continue.
+ */
+ esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
+ addr, vid, port->vport_num, err);
+
+ trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
+ return 0;
+}
+
+static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_mdb_entry *entry)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ int err;
+
+ mlx5_esw_bridge_mdb_port_remove(port, entry);
+ if (!entry->num_ports) {
+ mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
+ return;
+ }
+
+ err = mlx5_esw_bridge_port_mdb_offload(port, entry);
+ if (err)
+ /* Single mdb can be used by multiple ports, so just log the
+ * error and continue.
+ */
+ esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
+ entry->key.addr, entry->key.vid, port->vport_num);
+}
+
+void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_esw_bridge_mdb_entry *entry;
+
+ entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
+ if (!entry) {
+ esw_debug(bridge->br_offloads->esw->dev,
+ "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, port->vport_num);
+ return;
+ }
+
+ if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
+ esw_debug(bridge->br_offloads->esw->dev,
+ "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, port->vport_num);
+ return;
+ }
+
+ trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
+ mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
+}
+
+void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
+ if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
+ mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
+}
+
+static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
+ if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
+ mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
+}
+
+void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
+ mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
+}
+static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_eswitch *esw = bridge->br_offloads->esw;
+ struct mlx5_flow_table *mcast_ft;
+
+ mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
+ esw);
+ if (IS_ERR(mcast_ft))
+ return PTR_ERR(mcast_ft);
+
+ port->mcast.ft = mcast_ft;
+ return 0;
+}
+
+static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
+{
+ if (port->mcast.ft)
+ mlx5_destroy_flow_table(port->mcast.ft);
+ port->mcast.ft = NULL;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *mcast_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(mcast_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
+ fg);
+
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *mcast_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
+
+ fg = mlx5_create_flow_group(mcast_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
+ vlan_proto, fg);
+
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *mcast_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *mcast_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(mcast_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
+ fg);
+
+ return fg;
+}
+
+static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
+ struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
+ struct mlx5_flow_table *mcast_ft = port->mcast.ft;
+ int err;
+
+ filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
+ if (IS_ERR(filter_fg))
+ return PTR_ERR(filter_fg);
+
+ vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
+ if (IS_ERR(vlan_fg)) {
+ err = PTR_ERR(vlan_fg);
+ goto err_vlan_fg;
+ }
+
+ qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
+ fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
+ if (IS_ERR(fwd_fg)) {
+ err = PTR_ERR(fwd_fg);
+ goto err_fwd_fg;
+ }
+
+ port->mcast.filter_fg = filter_fg;
+ port->mcast.vlan_fg = vlan_fg;
+ port->mcast.qinq_fg = qinq_fg;
+ port->mcast.fwd_fg = fwd_fg;
+
+ return 0;
+
+err_fwd_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
+ mlx5_destroy_flow_group(vlan_fg);
+err_vlan_fg:
+ mlx5_destroy_flow_group(filter_fg);
+ return err;
+}
+
+static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
+{
+ if (port->mcast.fwd_fg)
+ mlx5_destroy_flow_group(port->mcast.fwd_fg);
+ port->mcast.fwd_fg = NULL;
+ if (port->mcast.qinq_fg)
+ mlx5_destroy_flow_group(port->mcast.qinq_fg);
+ port->mcast.qinq_fg = NULL;
+ if (port->mcast.vlan_fg)
+ mlx5_destroy_flow_group(port->mcast.vlan_fg);
+ port->mcast.vlan_fg = NULL;
+ if (port->mcast.filter_fg)
+ mlx5_destroy_flow_group(port->mcast.filter_fg);
+ port->mcast.filter_fg = NULL;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_DROP,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
+
+ handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
+{
+ return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
+ static struct mlx5_flow_handle *handle;
+ struct mlx5_eswitch *peer_esw;
+
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ return ERR_PTR(-ENODEV);
+
+ handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
+
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan)
+{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ .vport.num = port->vport_num,
+ };
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+ port->vport_num == MLX5_VPORT_UPLINK)
+ rule_spec->flow_context.flow_source =
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.pkt_reformat = vlan->pkt_reformat_pop;
+
+ if (vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
+
+ if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
+ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ dest.vport.vhca_id = port->esw_owner_vhca_id;
+ }
+ handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan)
+{
+ struct mlx5_flow_handle *handle;
+
+ if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
+ return 0;
+
+ handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ vlan->mcast_handle = handle;
+ return 0;
+}
+
+void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
+{
+ if (vlan->mcast_handle)
+ mlx5_del_flow_rules(vlan->mcast_handle);
+ vlan->mcast_handle = NULL;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ .vport.num = port->vport_num,
+ };
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+ port->vport_num == MLX5_VPORT_UPLINK)
+ rule_spec->flow_context.flow_source =
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+
+ if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
+ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ dest.vport.vhca_id = port->esw_owner_vhca_id;
+ }
+ handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_flow_handle *filter_handle, *fwd_handle;
+ struct mlx5_esw_bridge_vlan *vlan, *failed;
+ unsigned long index;
+ int err;
+
+
+ filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
+ mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
+ mlx5_esw_bridge_mcast_filter_flow_create(port);
+ if (IS_ERR(filter_handle))
+ return PTR_ERR(filter_handle);
+
+ fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
+ if (IS_ERR(fwd_handle)) {
+ err = PTR_ERR(fwd_handle);
+ goto err_fwd;
+ }
+
+ xa_for_each(&port->vlans, index, vlan) {
+ err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
+ if (err) {
+ failed = vlan;
+ goto err_vlan;
+ }
+ }
+
+ port->mcast.filter_handle = filter_handle;
+ port->mcast.fwd_handle = fwd_handle;
+
+ return 0;
+
+err_vlan:
+ xa_for_each(&port->vlans, index, vlan) {
+ if (vlan == failed)
+ break;
+
+ mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
+ }
+ mlx5_del_flow_rules(fwd_handle);
+err_fwd:
+ mlx5_del_flow_rules(filter_handle);
+ return err;
+}
+
+static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
+
+ if (port->mcast.fwd_handle)
+ mlx5_del_flow_rules(port->mcast.fwd_handle);
+ port->mcast.fwd_handle = NULL;
+ if (port->mcast.filter_handle)
+ mlx5_del_flow_rules(port->mcast.filter_handle);
+ port->mcast.filter_handle = NULL;
+}
+
+int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
+{
+ struct mlx5_esw_bridge *bridge = port->bridge;
+ int err;
+
+ if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
+ return 0;
+
+ err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
+ if (err)
+ return err;
+
+ err = mlx5_esw_bridge_port_mcast_fgs_init(port);
+ if (err)
+ goto err_fgs;
+
+ err = mlx5_esw_bridge_port_mcast_fhs_init(port);
+ if (err)
+ goto err_fhs;
+ return err;
+
+err_fhs:
+ mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
+err_fgs:
+ mlx5_esw_bridge_port_mcast_fts_cleanup(port);
+ return err;
+}
+
+void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
+{
+ mlx5_esw_bridge_port_mdb_flush(port);
+ mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
+ mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
+ mlx5_esw_bridge_port_mcast_fts_cleanup(port);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
+ fg);
+
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
+ esw_warn(esw->dev,
+ "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
+ return NULL;
+ }
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
+ MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
+ fg);
+
+ return fg;
+}
+
+static int
+mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ struct mlx5_flow_group *igmp_fg, *mld_fg;
+
+ igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
+ if (IS_ERR(igmp_fg))
+ return PTR_ERR(igmp_fg);
+
+ mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
+ if (IS_ERR(mld_fg)) {
+ mlx5_destroy_flow_group(igmp_fg);
+ return PTR_ERR(mld_fg);
+ }
+
+ br_offloads->ingress_igmp_fg = igmp_fg;
+ br_offloads->ingress_mld_fg = mld_fg;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ if (br_offloads->ingress_mld_fg)
+ mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
+ br_offloads->ingress_mld_fg = NULL;
+ if (br_offloads->ingress_igmp_fg)
+ mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
+ br_offloads->ingress_igmp_fg = NULL;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
+ struct mlx5_flow_table *skip_ft)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
+
+ handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
+ struct mlx5_flow_table *skip_ft)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
+
+ handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static int
+mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
+ *mld_done_handle;
+ struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
+ *skip_ft = br_offloads->skip_ft;
+ int err;
+
+ igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
+ if (IS_ERR(igmp_handle))
+ return PTR_ERR(igmp_handle);
+
+ if (br_offloads->ingress_mld_fg) {
+ mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
+ ingress_ft,
+ skip_ft);
+ if (IS_ERR(mld_query_handle)) {
+ err = PTR_ERR(mld_query_handle);
+ goto err_mld_query;
+ }
+
+ mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
+ ingress_ft,
+ skip_ft);
+ if (IS_ERR(mld_report_handle)) {
+ err = PTR_ERR(mld_report_handle);
+ goto err_mld_report;
+ }
+
+ mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
+ ingress_ft,
+ skip_ft);
+ if (IS_ERR(mld_done_handle)) {
+ err = PTR_ERR(mld_done_handle);
+ goto err_mld_done;
+ }
+ } else {
+ mld_query_handle = NULL;
+ mld_report_handle = NULL;
+ mld_done_handle = NULL;
+ }
+
+ br_offloads->igmp_handle = igmp_handle;
+ br_offloads->mld_query_handle = mld_query_handle;
+ br_offloads->mld_report_handle = mld_report_handle;
+ br_offloads->mld_done_handle = mld_done_handle;
+
+ return 0;
+
+err_mld_done:
+ mlx5_del_flow_rules(mld_report_handle);
+err_mld_report:
+ mlx5_del_flow_rules(mld_query_handle);
+err_mld_query:
+ mlx5_del_flow_rules(igmp_handle);
+ return err;
+}
+
+static void
+mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ if (br_offloads->mld_done_handle)
+ mlx5_del_flow_rules(br_offloads->mld_done_handle);
+ br_offloads->mld_done_handle = NULL;
+ if (br_offloads->mld_report_handle)
+ mlx5_del_flow_rules(br_offloads->mld_report_handle);
+ br_offloads->mld_report_handle = NULL;
+ if (br_offloads->mld_query_handle)
+ mlx5_del_flow_rules(br_offloads->mld_query_handle);
+ br_offloads->mld_query_handle = NULL;
+ if (br_offloads->igmp_handle)
+ mlx5_del_flow_rules(br_offloads->igmp_handle);
+ br_offloads->igmp_handle = NULL;
+}
+
+static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_port *port, *failed;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port->bridge != bridge)
+ continue;
+
+ err = mlx5_esw_bridge_port_mcast_init(port);
+ if (err) {
+ failed = port;
+ goto err_port;
+ }
+ }
+ return 0;
+
+err_port:
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port == failed)
+ break;
+ if (port->bridge != bridge)
+ continue;
+
+ mlx5_esw_bridge_port_mcast_cleanup(port);
+ }
+ return err;
+}
+
+static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_port *port;
+ unsigned long i;
+
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port->bridge != bridge)
+ continue;
+
+ mlx5_esw_bridge_port_mcast_cleanup(port);
+ }
+}
+
+static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ int err;
+
+ if (br_offloads->ingress_igmp_fg)
+ return 0; /* already enabled by another bridge */
+
+ err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
+ if (err) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to create global multicast flow groups (err=%d)\n",
+ err);
+ return err;
+ }
+
+ err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
+ if (err) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to create global multicast flows (err=%d)\n",
+ err);
+ goto err_fhs;
+ }
+
+ return 0;
+
+err_fhs:
+ mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
+ return err;
+}
+
+static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge *br;
+
+ list_for_each_entry(br, &br_offloads->bridges, list) {
+ /* Ingress table is global, so only disable snooping when all
+ * bridges on esw have multicast disabled.
+ */
+ if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
+ return;
+ }
+
+ mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
+ mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
+}
+
+int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
+{
+ int err;
+
+ err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
+ if (err)
+ return err;
+
+ bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
+
+ err = mlx5_esw_brige_mcast_init(bridge);
+ if (err) {
+ esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
+ err);
+ bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
+ mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
+ }
+ return err;
+}
+
+void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
+{
+ mlx5_esw_brige_mcast_cleanup(bridge);
+ bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
+ mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 878311fe950a..c9595801bdb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -12,11 +12,124 @@
#include <linux/xarray.h>
#include "fs_core.h"
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_SIZE 1
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_SIZE 3
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 131072
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE \
+ (524288 - MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_SIZE - \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_SIZE)
+
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 1048576);
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 131072
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (262144 - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 524288);
+
+#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
+
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_SIZE 1
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_SIZE 1
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_SIZE 4095
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_SIZE MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_SIZE
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_SIZE - 1)
+
+#define MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE == 8192);
+
+enum {
+ MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
+};
+
+enum {
+ MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
+ MLX5_ESW_BRIDGE_MCAST_FLAG = BIT(1),
+};
+
struct mlx5_esw_bridge_fdb_key {
unsigned char addr[ETH_ALEN];
u16 vid;
};
+struct mlx5_esw_bridge_mdb_key {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
enum {
MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1),
@@ -43,6 +156,16 @@ struct mlx5_esw_bridge_fdb_entry {
struct mlx5_flow_handle *filter_handle;
};
+struct mlx5_esw_bridge_mdb_entry {
+ struct mlx5_esw_bridge_mdb_key key;
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct xarray ports;
+ int num_ports;
+
+ struct mlx5_flow_handle *egress_handle;
+};
+
struct mlx5_esw_bridge_vlan {
u16 vid;
u16 flags;
@@ -50,6 +173,7 @@ struct mlx5_esw_bridge_vlan {
struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop;
struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
+ struct mlx5_flow_handle *mcast_handle;
};
struct mlx5_esw_bridge_port {
@@ -58,6 +182,63 @@ struct mlx5_esw_bridge_port {
u16 flags;
struct mlx5_esw_bridge *bridge;
struct xarray vlans;
+ struct {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *filter_fg;
+ struct mlx5_flow_group *vlan_fg;
+ struct mlx5_flow_group *qinq_fg;
+ struct mlx5_flow_group *fwd_fg;
+
+ struct mlx5_flow_handle *filter_handle;
+ struct mlx5_flow_handle *fwd_handle;
+ } mcast;
};
+struct mlx5_esw_bridge {
+ int ifindex;
+ int refcnt;
+ struct list_head list;
+ struct mlx5_esw_bridge_offloads *br_offloads;
+
+ struct list_head fdb_list;
+ struct rhashtable fdb_ht;
+
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
+
+ struct mlx5_flow_table *egress_ft;
+ struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_qinq_fg;
+ struct mlx5_flow_group *egress_mac_fg;
+ struct mlx5_flow_group *egress_miss_fg;
+ struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
+ struct mlx5_flow_handle *egress_miss_handle;
+ unsigned long ageing_time;
+ u32 flags;
+ u16 vlan_proto;
+};
+
+struct mlx5_flow_table *mlx5_esw_bridge_table_create(int max_fte, u32 level,
+ struct mlx5_eswitch *esw);
+unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port);
+
+int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port);
+void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port);
+int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan);
+void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan);
+
+int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge);
+void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge);
+
+int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge);
+void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge);
+int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
+ const unsigned char *addr, u16 vid);
+void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
+ const unsigned char *addr, u16 vid);
+void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan);
+void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge);
+
#endif /* _MLX5_ESW_BRIDGE_PRIVATE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
index 51ac24e6ec3c..1808da214094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -110,6 +110,41 @@ DEFINE_EVENT(mlx5_esw_bridge_port_template,
TP_ARGS(port)
);
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_mdb_port_change_template,
+ TP_PROTO(const struct net_device *dev,
+ const struct mlx5_esw_bridge_mdb_entry *mdb),
+ TP_ARGS(dev, mdb),
+ TP_STRUCT__entry(
+ __array(char, dev_name, IFNAMSIZ)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(int, num_ports)
+ __field(bool, offloaded)),
+ TP_fast_assign(
+ strscpy(__entry->dev_name, netdev_name(dev), IFNAMSIZ);
+ memcpy(__entry->addr, mdb->key.addr, ETH_ALEN);
+ __entry->vid = mdb->key.vid;
+ __entry->num_ports = mdb->num_ports;
+ __entry->offloaded = mdb->egress_handle;),
+ TP_printk("net_device=%s addr=%pM vid=%u num_ports=%d offloaded=%d",
+ __entry->dev_name,
+ __entry->addr,
+ __entry->vid,
+ __entry->num_ports,
+ __entry->offloaded));
+
+DEFINE_EVENT(mlx5_esw_bridge_mdb_port_change_template,
+ mlx5_esw_bridge_port_mdb_attach,
+ TP_PROTO(const struct net_device *dev,
+ const struct mlx5_esw_bridge_mdb_entry *mdb),
+ TP_ARGS(dev, mdb));
+
+DEFINE_EVENT(mlx5_esw_bridge_mdb_port_change_template,
+ mlx5_esw_bridge_port_mdb_detach,
+ TP_PROTO(const struct net_device *dev,
+ const struct mlx5_esw_bridge_mdb_entry *mdb),
+ TP_ARGS(dev, mdb));
+
#endif
/* This part must be outside protection */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8bdf28762f41..19fed514fc17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1488,7 +1488,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) {
+ if (!mlx5_core_is_ecpf(dev)) {
*max_sfs = 0;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 19e9a77c4633..e9d68fdf68f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -263,6 +263,7 @@ struct mlx5_esw_offload {
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
atomic64_t num_flows;
+ u64 num_block_encap;
enum devlink_eswitch_encap_mode encap;
struct ida vport_metadata_ida;
unsigned int host_number; /* ECPF supports one external host */
@@ -748,6 +749,9 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
+
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
if (mlx5_esw_allowed(esw))
@@ -761,6 +765,7 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
{
return esw->fdb_table.offloads.slow_fdb;
}
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -805,6 +810,15 @@ mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
{
return 0;
}
+
+static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
+static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
+{
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 48036dfddd5e..b6e2709c1371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3586,6 +3586,47 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return err;
}
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ struct mlx5_eswitch *esw;
+
+ devl_lock(devlink);
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw)) {
+ devl_unlock(devlink);
+ /* Failure means no eswitch => not possible to change encap */
+ return true;
+ }
+
+ down_write(&esw->mode_lock);
+ if (esw->mode != MLX5_ESWITCH_LEGACY &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ up_write(&esw->mode_lock);
+ devl_unlock(devlink);
+ return false;
+ }
+
+ esw->offloads.num_block_encap++;
+ up_write(&esw->mode_lock);
+ devl_unlock(devlink);
+ return true;
+}
+
+void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ struct mlx5_eswitch *esw;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return;
+
+ down_write(&esw->mode_lock);
+ esw->offloads.num_block_encap--;
+ up_write(&esw->mode_lock);
+}
+
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack)
@@ -3627,6 +3668,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
goto unlock;
}
+ if (esw->offloads.num_block_encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set encapsulation when IPsec SA and/or policies are configured");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8e3da9d4fe1c..19da02c41616 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2997,7 +2997,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err;
}
- maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 4);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4c9a40211059..932fbc843c69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -39,7 +39,7 @@
#include "clock.h"
enum {
- MLX5_CYCLES_SHIFT = 23
+ MLX5_CYCLES_SHIFT = 31
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f95df73d1089..a95d1218def9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -100,15 +100,19 @@ enum {
static struct mlx5_profile profile[] = {
[0] = {
.mask = 0,
+ .num_cmd_caches = MLX5_NUM_COMMAND_CACHES,
},
[1] = {
.mask = MLX5_PROF_MASK_QP_SIZE,
.log_max_qp = 12,
+ .num_cmd_caches = MLX5_NUM_COMMAND_CACHES,
+
},
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
.log_max_qp = LOG_MAX_SUPPORTED_QPS,
+ .num_cmd_caches = MLX5_NUM_COMMAND_CACHES,
.mr_cache[0] = {
.size = 500,
.limit = 250
@@ -174,6 +178,11 @@ static struct mlx5_profile profile[] = {
.limit = 4
},
},
+ [3] = {
+ .mask = MLX5_PROF_MASK_QP_SIZE,
+ .log_max_qp = LOG_MAX_SUPPORTED_QPS,
+ .num_cmd_caches = 0,
+ },
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index be0785f83083..5eaab99678ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -142,6 +142,7 @@ enum mlx5_semaphore_space_address {
};
#define MLX5_DEFAULT_PROF 2
+#define MLX5_SF_PROF 3
static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
size_t item_size, size_t num_items,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index a7377619ba6f..e2f26d0bc615 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -28,7 +28,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mdev->priv.adev_idx = adev->id;
sf_dev->mdev = mdev;
- err = mlx5_mdev_init(mdev, MLX5_DEFAULT_PROF);
+ err = mlx5_mdev_init(mdev, MLX5_SF_PROF);
if (err) {
mlx5_core_warn(mdev, "mlx5_mdev_init on err=%d\n", err);
goto mdev_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index ee104cf04392..0eb9a8d7f282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -819,14 +819,34 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_TNL_L2_TO_L2:
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- attr.decap_index = action->rewrite->index;
- attr.decap_actions = action->rewrite->num_of_actions;
- attr.decap_with_vlan =
- attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
+ if (action->rewrite->ptrn && action->rewrite->arg) {
+ attr.decap_index = mlx5dr_arg_get_obj_id(action->rewrite->arg);
+ attr.decap_actions = action->rewrite->ptrn->num_of_actions;
+ attr.decap_pat_idx = action->rewrite->ptrn->index;
+ } else {
+ attr.decap_index = action->rewrite->index;
+ attr.decap_actions = action->rewrite->num_of_actions;
+ attr.decap_with_vlan =
+ attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
+ attr.decap_pat_idx = MLX5DR_INVALID_PATTERN_INDEX;
+ }
break;
case DR_ACTION_TYP_MODIFY_HDR:
- attr.modify_index = action->rewrite->index;
- attr.modify_actions = action->rewrite->num_of_actions;
+ if (action->rewrite->single_action_opt) {
+ attr.modify_actions = action->rewrite->num_of_actions;
+ attr.single_modify_action = action->rewrite->data;
+ } else {
+ if (action->rewrite->ptrn && action->rewrite->arg) {
+ attr.modify_index =
+ mlx5dr_arg_get_obj_id(action->rewrite->arg);
+ attr.modify_actions = action->rewrite->ptrn->num_of_actions;
+ attr.modify_pat_idx = action->rewrite->ptrn->index;
+ } else {
+ attr.modify_index = action->rewrite->index;
+ attr.modify_actions = action->rewrite->num_of_actions;
+ attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX;
+ }
+ }
if (action->rewrite->modify_ttl)
dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
&recalc_cs_required);
@@ -1365,8 +1385,6 @@ out_err:
return -EINVAL;
}
-#define ACTION_CACHE_LINE_SIZE 64
-
static int
dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
u8 reformat_param_0, u8 reformat_param_1,
@@ -1403,36 +1421,25 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
- u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
+ u8 hw_actions[DR_ACTION_CACHE_LINE_SIZE] = {};
int ret;
ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
data, data_sz,
hw_actions,
- ACTION_CACHE_LINE_SIZE,
+ DR_ACTION_CACHE_LINE_SIZE,
&action->rewrite->num_of_actions);
if (ret) {
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
return ret;
}
- action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
- DR_CHUNK_SIZE_8);
- if (!action->rewrite->chunk) {
- mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
- return -ENOMEM;
- }
-
- action->rewrite->data = (void *)hw_actions;
- action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr
- (action->rewrite->chunk) -
- dmn->info.caps.hdr_modify_icm_addr) /
- ACTION_CACHE_LINE_SIZE;
+ action->rewrite->data = hw_actions;
+ action->rewrite->dmn = dmn;
- ret = mlx5dr_send_postsend_action(dmn, action);
+ ret = mlx5dr_ste_alloc_modify_hdr(action);
if (ret) {
- mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
- mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ mlx5dr_dbg(dmn, "Failed preparing reformat data\n");
return ret;
}
return 0;
@@ -1963,7 +1970,6 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
__be64 actions[],
struct mlx5dr_action *action)
{
- struct mlx5dr_icm_chunk *chunk;
u32 max_hw_actions;
u32 num_hw_actions;
u32 num_sw_actions;
@@ -1980,15 +1986,9 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool, DR_CHUNK_SIZE_16);
- if (!chunk)
- return -ENOMEM;
-
hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL);
- if (!hw_actions) {
- ret = -ENOMEM;
- goto free_chunk;
- }
+ if (!hw_actions)
+ return -ENOMEM;
ret = dr_actions_convert_modify_header(action,
max_hw_actions,
@@ -2000,24 +2000,24 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
if (ret)
goto free_hw_actions;
- action->rewrite->chunk = chunk;
action->rewrite->modify_ttl = modify_ttl;
action->rewrite->data = (u8 *)hw_actions;
action->rewrite->num_of_actions = num_hw_actions;
- action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
- dmn->info.caps.hdr_modify_icm_addr) /
- ACTION_CACHE_LINE_SIZE;
- ret = mlx5dr_send_postsend_action(dmn, action);
- if (ret)
- goto free_hw_actions;
+ if (num_hw_actions == 1 &&
+ dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) {
+ action->rewrite->single_action_opt = true;
+ } else {
+ action->rewrite->single_action_opt = false;
+ ret = mlx5dr_ste_alloc_modify_hdr(action);
+ if (ret)
+ goto free_hw_actions;
+ }
return 0;
free_hw_actions:
kfree(hw_actions);
-free_chunk:
- mlx5dr_icm_free_chunk(chunk);
return ret;
}
@@ -2162,7 +2162,8 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ mlx5dr_ste_free_modify_hdr(action);
+ kfree(action->rewrite->data);
refcount_dec(&action->rewrite->dmn->refcount);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
@@ -2173,7 +2174,8 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_MODIFY_HDR:
- mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ if (!action->rewrite->single_action_opt)
+ mlx5dr_ste_free_modify_hdr(action);
kfree(action->rewrite->data);
refcount_dec(&action->rewrite->dmn->refcount);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c
new file mode 100644
index 000000000000..01ed6442095d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+
+#define DR_ICM_MODIFY_HDR_GRANULARITY_4K 12
+
+/* modify-header arg pool */
+enum dr_arg_chunk_size {
+ DR_ARG_CHUNK_SIZE_1,
+ DR_ARG_CHUNK_SIZE_MIN = DR_ARG_CHUNK_SIZE_1, /* keep updated when changing */
+ DR_ARG_CHUNK_SIZE_2,
+ DR_ARG_CHUNK_SIZE_3,
+ DR_ARG_CHUNK_SIZE_4,
+ DR_ARG_CHUNK_SIZE_MAX,
+};
+
+/* argument pool area */
+struct dr_arg_pool {
+ enum dr_arg_chunk_size log_chunk_size;
+ struct mlx5dr_domain *dmn;
+ struct list_head free_list;
+ struct mutex mutex; /* protect arg pool */
+};
+
+struct mlx5dr_arg_mgr {
+ struct mlx5dr_domain *dmn;
+ struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX];
+};
+
+static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool)
+{
+ struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
+ struct list_head cur_list;
+ u16 object_range;
+ int num_of_objects;
+ u32 obj_id = 0;
+ int i, ret;
+
+ INIT_LIST_HEAD(&cur_list);
+
+ object_range =
+ pool->dmn->info.caps.log_header_modify_argument_granularity;
+
+ object_range =
+ max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity,
+ DR_ICM_MODIFY_HDR_GRANULARITY_4K);
+ object_range =
+ min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc,
+ object_range);
+
+ if (pool->log_chunk_size > object_range) {
+ mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n",
+ pool->log_chunk_size);
+ return -ENOMEM;
+ }
+
+ num_of_objects = (1 << (object_range - pool->log_chunk_size));
+ /* Only one devx object per range */
+ ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev,
+ object_range,
+ pool->dmn->pdn,
+ &obj_id);
+ if (ret) {
+ mlx5dr_err(pool->dmn, "failed allocating object with range: %d:\n",
+ object_range);
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < num_of_objects; i++) {
+ arg_obj = kzalloc(sizeof(*arg_obj), GFP_KERNEL);
+ if (!arg_obj) {
+ ret = -ENOMEM;
+ goto clean_arg_obj;
+ }
+
+ arg_obj->log_chunk_size = pool->log_chunk_size;
+
+ list_add_tail(&arg_obj->list_node, &cur_list);
+
+ arg_obj->obj_id = obj_id;
+ arg_obj->obj_offset = i * (1 << pool->log_chunk_size);
+ }
+ list_splice_tail_init(&cur_list, &pool->free_list);
+
+ return 0;
+
+clean_arg_obj:
+ mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, obj_id);
+ list_for_each_entry_safe(arg_obj, tmp_arg, &cur_list, list_node) {
+ list_del(&arg_obj->list_node);
+ kfree(arg_obj);
+ }
+ return ret;
+}
+
+static struct mlx5dr_arg_obj *dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool)
+{
+ struct mlx5dr_arg_obj *arg_obj = NULL;
+ int ret;
+
+ mutex_lock(&pool->mutex);
+ if (list_empty(&pool->free_list)) {
+ ret = dr_arg_pool_alloc_objs(pool);
+ if (ret)
+ goto out;
+ }
+
+ arg_obj = list_first_entry_or_null(&pool->free_list,
+ struct mlx5dr_arg_obj,
+ list_node);
+ WARN(!arg_obj, "couldn't get dr arg obj from pool");
+
+ if (arg_obj)
+ list_del_init(&arg_obj->list_node);
+
+out:
+ mutex_unlock(&pool->mutex);
+ return arg_obj;
+}
+
+static void dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool,
+ struct mlx5dr_arg_obj *arg_obj)
+{
+ mutex_lock(&pool->mutex);
+ list_add(&arg_obj->list_node, &pool->free_list);
+ mutex_unlock(&pool->mutex);
+}
+
+static struct dr_arg_pool *dr_arg_pool_create(struct mlx5dr_domain *dmn,
+ enum dr_arg_chunk_size chunk_size)
+{
+ struct dr_arg_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->dmn = dmn;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ mutex_init(&pool->mutex);
+
+ pool->log_chunk_size = chunk_size;
+ if (dr_arg_pool_alloc_objs(pool))
+ goto free_pool;
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+
+ return NULL;
+}
+
+static void dr_arg_pool_destroy(struct dr_arg_pool *pool)
+{
+ struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
+
+ list_for_each_entry_safe(arg_obj, tmp_arg, &pool->free_list, list_node) {
+ list_del(&arg_obj->list_node);
+ if (!arg_obj->obj_offset) /* the first in range */
+ mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, arg_obj->obj_id);
+ kfree(arg_obj);
+ }
+
+ mutex_destroy(&pool->mutex);
+ kfree(pool);
+}
+
+static enum dr_arg_chunk_size dr_arg_get_chunk_size(u16 num_of_actions)
+{
+ if (num_of_actions <= 8)
+ return DR_ARG_CHUNK_SIZE_1;
+ if (num_of_actions <= 16)
+ return DR_ARG_CHUNK_SIZE_2;
+ if (num_of_actions <= 32)
+ return DR_ARG_CHUNK_SIZE_3;
+ if (num_of_actions <= 64)
+ return DR_ARG_CHUNK_SIZE_4;
+
+ return DR_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj)
+{
+ return (arg_obj->obj_id + arg_obj->obj_offset);
+}
+
+struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
+ u16 num_of_actions,
+ u8 *data)
+{
+ u32 size = dr_arg_get_chunk_size(num_of_actions);
+ struct mlx5dr_arg_obj *arg_obj;
+ int ret;
+
+ if (size >= DR_ARG_CHUNK_SIZE_MAX)
+ return NULL;
+
+ arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
+ if (!arg_obj) {
+ mlx5dr_err(mgr->dmn, "Failed allocating args object for modify header\n");
+ return NULL;
+ }
+
+ /* write it into the hw */
+ ret = mlx5dr_send_postsend_args(mgr->dmn,
+ mlx5dr_arg_get_obj_id(arg_obj),
+ num_of_actions, data);
+ if (ret) {
+ mlx5dr_err(mgr->dmn, "Failed writing args object\n");
+ goto put_obj;
+ }
+
+ return arg_obj;
+
+put_obj:
+ mlx5dr_arg_put_obj(mgr, arg_obj);
+ return NULL;
+}
+
+void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
+ struct mlx5dr_arg_obj *arg_obj)
+{
+ dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
+}
+
+struct mlx5dr_arg_mgr*
+mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_arg_mgr *pool_mgr;
+ int i;
+
+ if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+ return NULL;
+
+ pool_mgr = kzalloc(sizeof(*pool_mgr), GFP_KERNEL);
+ if (!pool_mgr)
+ return NULL;
+
+ pool_mgr->dmn = dmn;
+
+ for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) {
+ pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
+ if (!pool_mgr->pools[i])
+ goto clean_pools;
+ }
+
+ return pool_mgr;
+
+clean_pools:
+ for (i--; i >= 0; i--)
+ dr_arg_pool_destroy(pool_mgr->pools[i]);
+
+ kfree(pool_mgr);
+ return NULL;
+}
+
+void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr)
+{
+ struct dr_arg_pool **pools;
+ int i;
+
+ if (!mgr)
+ return;
+
+ pools = mgr->pools;
+ for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++)
+ dr_arg_pool_destroy(pools[i]);
+
+ kfree(mgr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 07b6a6dcb92f..3835ba3f4dda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -132,6 +132,17 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
+ caps->support_modify_argument =
+ MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT;
+
+ if (caps->support_modify_argument) {
+ caps->log_header_modify_argument_granularity =
+ MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity);
+ caps->log_header_modify_argument_max_alloc =
+ MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc);
+ }
+
/* geneve_tlv_option_0_exist is the indication of
* STE support for lookup type flex_parser_ok
*/
@@ -200,6 +211,12 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->hdr_modify_icm_addr =
MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
+ caps->log_modify_pattern_icm_size =
+ MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);
+
+ caps->hdr_modify_pattern_icm_addr =
+ MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);
+
caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
@@ -676,6 +693,49 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
return 0;
}
+int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
+ u16 log_obj_range, u32 pd,
+ u32 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, attr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr, attr,
+ op_param.create.log_obj_range, log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return 0;
+}
+
+void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
+ u32 obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
struct mlx5dr_cmd_fte_info *fte,
bool *extended_dest)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index db81d881d38e..1ff8bde90e1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -140,10 +140,31 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
action->flow_tag->flow_tag);
break;
case DR_ACTION_TYP_MODIFY_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ {
+ struct mlx5dr_ptrn_obj *ptrn = action->rewrite->ptrn;
+ struct mlx5dr_arg_obj *arg = action->rewrite->arg;
+ u8 *rewrite_data = action->rewrite->data;
+ bool ptrn_arg;
+ int i;
+
+ ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
+
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
- rule_id, action->rewrite->index);
+ rule_id, action->rewrite->index,
+ action->rewrite->single_action_opt,
+ action->rewrite->num_of_actions,
+ ptrn_arg ? ptrn->index : 0,
+ ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+
+ for (i = 0; i < action->rewrite->num_of_actions; i++) {
+ seq_printf(file, ",0x%016llx",
+ be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ }
+
+ seq_puts(file, "\n");
break;
+ }
case DR_ACTION_TYP_VPORT:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
@@ -157,7 +178,10 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
case DR_ACTION_TYP_TNL_L3_TO_L2:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
- rule_id, action->rewrite->index);
+ rule_id,
+ (action->rewrite->ptrn && action->rewrite->arg) ?
+ mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+ action->rewrite->index);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5b8bb2ca31e6..9a2dfe6ebe31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -10,6 +10,46 @@
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
+{
+ return dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX &&
+ dmn->info.caps.support_modify_argument;
+}
+
+static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)
+{
+ if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+ return 0;
+
+ dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn);
+ if (!dmn->ptrn_mgr) {
+ mlx5dr_err(dmn, "Couldn't create ptrn_mgr\n");
+ return -ENOMEM;
+ }
+
+ /* create argument pool */
+ dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn);
+ if (!dmn->arg_mgr) {
+ mlx5dr_err(dmn, "Couldn't create arg_mgr\n");
+ goto free_modify_header_pattern;
+ }
+
+ return 0;
+
+free_modify_header_pattern:
+ mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
+ return -ENOMEM;
+}
+
+static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn)
+{
+ if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+ return;
+
+ mlx5dr_arg_mgr_destroy(dmn->arg_mgr);
+ mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
+}
+
static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
@@ -149,14 +189,22 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
goto clean_uar;
}
+ ret = dr_domain_init_modify_header_resources(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create modify-header-resources\n");
+ goto clean_mem_resources;
+ }
+
ret = mlx5dr_send_ring_alloc(dmn);
if (ret) {
mlx5dr_err(dmn, "Couldn't create send-ring\n");
- goto clean_mem_resources;
+ goto clean_modify_hdr;
}
return 0;
+clean_modify_hdr:
+ dr_domain_destroy_modify_header_resources(dmn);
clean_mem_resources:
dr_domain_uninit_mem_resources(dmn);
clean_uar:
@@ -170,6 +218,7 @@ clean_pd:
static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
{
mlx5dr_send_ring_free(dmn, dmn->send_ring);
+ dr_domain_destroy_modify_header_resources(dmn);
dr_domain_uninit_mem_resources(dmn);
mlx5_put_uars_page(dmn->mdev, dmn->uar);
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
@@ -215,7 +264,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
+static int dr_domain_query_esw_mgr(struct mlx5dr_domain *dmn)
{
return dr_domain_query_vport(dmn, 0, false,
&dmn->info.caps.vports.esw_manager_caps);
@@ -321,7 +370,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
* vports (vport 0, VFs and SFs) will be queried dynamically.
*/
- ret = dr_domain_query_esw_mngr(dmn);
+ ret = dr_domain_query_esw_mgr(dmn);
if (ret) {
mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
goto free_vports_caps_xa;
@@ -435,6 +484,9 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
dmn->info.caps.log_icm_size);
+ dmn->info.max_log_modify_hdr_pattern_icm_sz =
+ min_t(u32, DR_CHUNK_SIZE_4K,
+ dmn->info.caps.log_modify_pattern_icm_size);
if (!dmn->info.supp_sw_steering) {
mlx5dr_err(dmn, "SW steering is not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 3eb6719bc8eb..04fc170a6c16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -107,9 +107,9 @@ static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5_core_dev *mdev = pool->dmn->mdev;
- enum mlx5_sw_icm_type dm_type;
+ enum mlx5_sw_icm_type dm_type = 0;
struct mlx5dr_icm_mr *icm_mr;
- size_t log_align_base;
+ size_t log_align_base = 0;
int err;
icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
@@ -121,14 +121,25 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type);
- if (pool->icm_type == DR_ICM_TYPE_STE) {
+ switch (pool->icm_type) {
+ case DR_ICM_TYPE_STE:
dm_type = MLX5_SW_ICM_TYPE_STEERING;
log_align_base = ilog2(icm_mr->dm.length);
- } else {
+ break;
+ case DR_ICM_TYPE_MODIFY_ACTION:
dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
/* Align base is 64B */
log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+ break;
+ case DR_ICM_TYPE_MODIFY_HDR_PTRN:
+ dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
+ /* Align base is 64B */
+ log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+ break;
+ default:
+ WARN_ON(pool->icm_type);
}
+
icm_mr->dm.type = dm_type;
err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
@@ -493,27 +504,33 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type)
{
u32 num_of_chunks, entry_size, max_hot_size;
- enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_icm_pool *pool;
- if (icm_type == DR_ICM_TYPE_STE)
- max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
- else
- max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
-
pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return NULL;
pool->dmn = dmn;
pool->icm_type = icm_type;
- pool->max_log_chunk_sz = max_log_chunk_sz;
pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
INIT_LIST_HEAD(&pool->buddy_mem_list);
-
mutex_init(&pool->mutex);
+ switch (icm_type) {
+ case DR_ICM_TYPE_STE:
+ pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
+ break;
+ case DR_ICM_TYPE_MODIFY_ACTION:
+ pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
+ break;
+ case DR_ICM_TYPE_MODIFY_HDR_PTRN:
+ pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
+ break;
+ default:
+ WARN_ON(icm_type);
+ }
+
entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
new file mode 100644
index 000000000000..13e06a6a6b22
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+#include "mlx5_ifc_dr_ste_v1.h"
+
+enum dr_ptrn_modify_hdr_action_id {
+ DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00,
+ DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05,
+ DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06,
+ DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07,
+ DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a,
+};
+
+struct mlx5dr_ptrn_mgr {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_icm_pool *ptrn_icm_pool;
+ /* cache for modify_header ptrn */
+ struct list_head ptrn_list;
+ struct mutex modify_hdr_mutex; /* protect the pattern cache */
+};
+
+/* Cache structure and functions */
+static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,
+ __be64 cur_hw_actions[],
+ size_t num_of_actions,
+ __be64 hw_actions[])
+{
+ int i;
+
+ if (cur_num_of_actions != num_of_actions)
+ return false;
+
+ for (i = 0; i < num_of_actions; i++) {
+ u8 action_id =
+ MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
+
+ if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) {
+ if (hw_actions[i] != cur_hw_actions[i])
+ return false;
+ } else {
+ if ((__force __be32)hw_actions[i] !=
+ (__force __be32)cur_hw_actions[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct mlx5dr_ptrn_obj *
+dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr,
+ size_t num_of_actions,
+ __be64 hw_actions[])
+{
+ struct mlx5dr_ptrn_obj *cached_pattern;
+ struct mlx5dr_ptrn_obj *tmp;
+
+ list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) {
+ if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions,
+ (__be64 *)cached_pattern->data,
+ num_of_actions,
+ hw_actions)) {
+ /* Put this pattern in the head of the list,
+ * as we will probably use it more.
+ */
+ list_del_init(&cached_pattern->list);
+ list_add(&cached_pattern->list, &mgr->ptrn_list);
+ return cached_pattern;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlx5dr_ptrn_obj *
+dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
+ u16 num_of_actions, u8 *data)
+{
+ struct mlx5dr_ptrn_obj *pattern;
+ struct mlx5dr_icm_chunk *chunk;
+ u32 chunk_size;
+ u32 index;
+
+ chunk_size = ilog2(num_of_actions);
+ /* HW modify action index granularity is at least 64B */
+ chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
+
+ chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size);
+ if (!chunk)
+ return NULL;
+
+ index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
+ mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) /
+ DR_ACTION_CACHE_LINE_SIZE;
+
+ pattern = kzalloc(sizeof(*pattern), GFP_KERNEL);
+ if (!pattern)
+ goto free_chunk;
+
+ pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE *
+ sizeof(*pattern->data), GFP_KERNEL);
+ if (!pattern->data)
+ goto free_pattern;
+
+ memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE);
+ pattern->chunk = chunk;
+ pattern->index = index;
+ pattern->num_of_actions = num_of_actions;
+
+ list_add(&pattern->list, &mgr->ptrn_list);
+ refcount_set(&pattern->refcount, 1);
+
+ return pattern;
+
+free_pattern:
+ kfree(pattern);
+free_chunk:
+ mlx5dr_icm_free_chunk(chunk);
+ return NULL;
+}
+
+static void
+dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern)
+{
+ list_del(&pattern->list);
+ mlx5dr_icm_free_chunk(pattern->chunk);
+ kfree(pattern->data);
+ kfree(pattern);
+}
+
+struct mlx5dr_ptrn_obj *
+mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
+ u16 num_of_actions,
+ u8 *data)
+{
+ struct mlx5dr_ptrn_obj *pattern;
+ u64 *hw_actions;
+ u8 action_id;
+ int i;
+
+ mutex_lock(&mgr->modify_hdr_mutex);
+ pattern = dr_ptrn_find_cached_pattern(mgr,
+ num_of_actions,
+ (__be64 *)data);
+ if (!pattern) {
+ /* Alloc and add new pattern to cache */
+ pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data);
+ if (!pattern)
+ goto out_unlock;
+
+ hw_actions = (u64 *)pattern->data;
+ /* Here we mask the pattern data to create a valid pattern
+ * since we do an OR operation between the arg and pattern
+ */
+ for (i = 0; i < num_of_actions; i++) {
+ action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
+
+ if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET ||
+ action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD ||
+ action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE)
+ MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0);
+ }
+
+ if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk,
+ num_of_actions, pattern->data)) {
+ refcount_dec(&pattern->refcount);
+ goto free_pattern;
+ }
+ } else {
+ refcount_inc(&pattern->refcount);
+ }
+
+ mutex_unlock(&mgr->modify_hdr_mutex);
+
+ return pattern;
+
+free_pattern:
+ dr_ptrn_free_pattern(pattern);
+out_unlock:
+ mutex_unlock(&mgr->modify_hdr_mutex);
+ return NULL;
+}
+
+void
+mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
+ struct mlx5dr_ptrn_obj *pattern)
+{
+ mutex_lock(&mgr->modify_hdr_mutex);
+
+ if (refcount_dec_and_test(&pattern->refcount))
+ dr_ptrn_free_pattern(pattern);
+
+ mutex_unlock(&mgr->modify_hdr_mutex);
+}
+
+struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_ptrn_mgr *mgr;
+
+ if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+ return NULL;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ mgr->dmn = dmn;
+ mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN);
+ if (!mgr->ptrn_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n");
+ goto free_mgr;
+ }
+
+ INIT_LIST_HEAD(&mgr->ptrn_list);
+ return mgr;
+
+free_mgr:
+ kfree(mgr);
+ return NULL;
+}
+
+void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
+{
+ struct mlx5dr_ptrn_obj *pattern;
+ struct mlx5dr_ptrn_obj *tmp;
+
+ if (!mgr)
+ return;
+
+ WARN_ON(!list_empty(&mgr->ptrn_list));
+
+ list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) {
+ list_del(&pattern->list);
+ kfree(pattern->data);
+ kfree(pattern);
+ }
+
+ mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
+ kfree(mgr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index fd2d31cdbcf9..4a5ae86e2b62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -18,7 +18,13 @@ struct dr_data_seg {
unsigned int send_flags;
};
+enum send_info_type {
+ WRITE_ICM = 0,
+ GTA_ARG = 1,
+};
+
struct postsend_info {
+ enum send_info_type type;
struct dr_data_seg write;
struct dr_data_seg read;
u64 remote_addr;
@@ -261,9 +267,10 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
dr_qp->rq.pc = 0;
dr_qp->rq.cc = 0;
- dr_qp->rq.wqe_cnt = 4;
+ dr_qp->rq.wqe_cnt = 256;
dr_qp->sq.pc = 0;
dr_qp->sq.cc = 0;
+ dr_qp->sq.head = 0;
dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
@@ -362,39 +369,113 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
}
-static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
- u32 rkey, struct dr_data_seg *data_seg,
- u32 opcode, bool notify_hw)
+static void
+dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ u32 remote_addr,
+ struct dr_data_seg *data_seg,
+ int *size)
{
- struct mlx5_wqe_raddr_seg *wq_raddr;
- struct mlx5_wqe_ctrl_seg *wq_ctrl;
- struct mlx5_wqe_data_seg *wq_dseg;
- unsigned int size;
- unsigned int idx;
+ struct mlx5_wqe_header_modify_argument_update_seg *wq_arg_seg;
+ struct mlx5_wqe_flow_update_ctrl_seg *wq_flow_seg;
- size = sizeof(*wq_ctrl) / 16 + sizeof(*wq_dseg) / 16 +
- sizeof(*wq_raddr) / 16;
+ wq_ctrl->general_id = cpu_to_be32(remote_addr);
+ wq_flow_seg = (void *)(wq_ctrl + 1);
- idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
+ /* mlx5_wqe_flow_update_ctrl_seg - all reserved */
+ memset(wq_flow_seg, 0, sizeof(*wq_flow_seg));
+ wq_arg_seg = (void *)(wq_flow_seg + 1);
+
+ memcpy(wq_arg_seg->argument_list,
+ (void *)(uintptr_t)data_seg->addr,
+ data_seg->length);
+
+ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
+ sizeof(*wq_flow_seg) + /* WQE flow update ctrl seg - reserved */
+ sizeof(*wq_arg_seg)) / /* WQE hdr modify arg seg - data */
+ MLX5_SEND_WQE_DS;
+}
+
+static void
+dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ u64 remote_addr,
+ u32 rkey,
+ struct dr_data_seg *data_seg,
+ unsigned int *size)
+{
+ struct mlx5_wqe_raddr_seg *wq_raddr;
+ struct mlx5_wqe_data_seg *wq_dseg;
- wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
- wq_ctrl->imm = 0;
- wq_ctrl->fm_ce_se = (data_seg->send_flags) ?
- MLX5_WQE_CTRL_CQ_UPDATE : 0;
- wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
- opcode);
- wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
wq_raddr = (void *)(wq_ctrl + 1);
+
wq_raddr->raddr = cpu_to_be64(remote_addr);
wq_raddr->rkey = cpu_to_be32(rkey);
wq_raddr->reserved = 0;
wq_dseg = (void *)(wq_raddr + 1);
+
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
wq_dseg->addr = cpu_to_be64(data_seg->addr);
- dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
+ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
+ sizeof(*wq_dseg) + /* WQE data segment */
+ sizeof(*wq_raddr)) / /* WQE remote addr segment */
+ MLX5_SEND_WQE_DS;
+}
+
+static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ struct dr_data_seg *data_seg)
+{
+ wq_ctrl->signature = 0;
+ wq_ctrl->rsvd[0] = 0;
+ wq_ctrl->rsvd[1] = 0;
+ wq_ctrl->fm_ce_se = data_seg->send_flags & IB_SEND_SIGNALED ?
+ MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wq_ctrl->imm = 0;
+}
+
+static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
+ u32 rkey, struct dr_data_seg *data_seg,
+ u32 opcode, bool notify_hw)
+{
+ struct mlx5_wqe_ctrl_seg *wq_ctrl;
+ int opcode_mod = 0;
+ unsigned int size;
+ unsigned int idx;
+
+ idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
+
+ wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+ dr_set_ctrl_seg(wq_ctrl, data_seg);
+
+ switch (opcode) {
+ case MLX5_OPCODE_RDMA_READ:
+ case MLX5_OPCODE_RDMA_WRITE:
+ dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
+ rkey, data_seg, &size);
+ break;
+ case MLX5_OPCODE_FLOW_TBL_ACCESS:
+ opcode_mod = MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT;
+ dr_rdma_handle_flow_access_arg_segments(wq_ctrl, remote_addr,
+ data_seg, &size);
+ break;
+ default:
+ WARN(true, "illegal opcode %d", opcode);
+ return;
+ }
+
+ /* --------------------------------------------------------
+ * |opcode_mod (8 bit)|wqe_index (16 bits)| opcod (8 bits)|
+ * --------------------------------------------------------
+ */
+ wq_ctrl->opmod_idx_opcode =
+ cpu_to_be32((opcode_mod << 24) |
+ ((dr_qp->sq.pc & 0xffff) << 8) |
+ opcode);
+ wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
+
+ dr_qp->sq.pc += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ dr_qp->sq.wqe_head[idx] = dr_qp->sq.head++;
if (notify_hw)
dr_cmd_notify_hw(dr_qp, wq_ctrl);
@@ -402,10 +483,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
{
- dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
- &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
- dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
- &send_info->read, MLX5_OPCODE_RDMA_READ, true);
+ if (send_info->type == WRITE_ICM) {
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->read, MLX5_OPCODE_RDMA_READ, true);
+ } else { /* GTA_ARG */
+ dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+ &send_info->write, MLX5_OPCODE_FLOW_TBL_ACCESS, true);
+ }
+
}
/**
@@ -471,24 +558,54 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
} else if (ne == 1) {
send_ring->pending_wqe -= send_ring->signal_th;
}
- } while (is_drain && send_ring->pending_wqe);
+ } while (ne == 1 ||
+ (is_drain && send_ring->pending_wqe >= send_ring->signal_th));
return 0;
}
-static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
- struct postsend_info *send_info)
+static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
+ struct postsend_info *send_info)
{
send_ring->pending_wqe++;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
+ else
+ send_info->write.send_flags = 0;
+}
+
+static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring,
+ struct postsend_info *send_info)
+{
+ u32 buff_offset;
+
+ if (send_info->write.length > dmn->info.max_inline_size) {
+ buff_offset = (send_ring->tx_head &
+ (dmn->send_ring->signal_th - 1)) *
+ send_ring->max_post_send_size;
+ /* Copy to ring mr */
+ memcpy(send_ring->buf + buff_offset,
+ (void *)(uintptr_t)send_info->write.addr,
+ send_info->write.length);
+ send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
+ send_info->write.lkey = send_ring->mr->mkey;
+
+ send_ring->tx_head++;
+ }
+
+ send_ring->pending_wqe++;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
send_ring->pending_wqe++;
send_info->read.length = send_info->write.length;
- /* Read into the same write area */
- send_info->read.addr = (uintptr_t)send_info->write.addr;
- send_info->read.lkey = send_ring->mr->mkey;
+
+ /* Read into dedicated sync buffer */
+ send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr;
+ send_info->read.lkey = send_ring->sync_mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->read.send_flags = IB_SEND_SIGNALED;
@@ -496,11 +613,20 @@ static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
send_info->read.send_flags = 0;
}
+static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
+ struct mlx5dr_send_ring *send_ring,
+ struct postsend_info *send_info)
+{
+ if (send_info->type == WRITE_ICM)
+ dr_fill_write_icm_segs(dmn, send_ring, send_info);
+ else /* args */
+ dr_fill_write_args_segs(send_ring, send_info);
+}
+
static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
struct postsend_info *send_info)
{
struct mlx5dr_send_ring *send_ring = dmn->send_ring;
- u32 buff_offset;
int ret;
if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
@@ -517,20 +643,7 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
if (ret)
goto out_unlock;
- if (send_info->write.length > dmn->info.max_inline_size) {
- buff_offset = (send_ring->tx_head &
- (dmn->send_ring->signal_th - 1)) *
- send_ring->max_post_send_size;
- /* Copy to ring mr */
- memcpy(send_ring->buf + buff_offset,
- (void *)(uintptr_t)send_info->write.addr,
- send_info->write.length);
- send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
- send_info->write.lkey = send_ring->mr->mkey;
- }
-
- send_ring->tx_head++;
- dr_fill_data_segs(send_ring, send_info);
+ dr_fill_data_segs(dmn, send_ring, send_info);
dr_post_send(send_ring->qp, send_info);
out_unlock:
@@ -736,6 +849,59 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
return dr_postsend_icm_data(dmn, &send_info);
}
+int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
+ struct mlx5dr_icm_chunk *chunk,
+ u16 num_of_actions,
+ u8 *data)
+{
+ struct postsend_info send_info = {};
+ int ret;
+
+ send_info.write.addr = (uintptr_t)data;
+ send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE;
+ send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk);
+
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
+ u16 num_of_actions, u8 *actions_data)
+{
+ int data_len, iter = 0, cur_sent;
+ u64 addr;
+ int ret;
+
+ addr = (uintptr_t)actions_data;
+ data_len = num_of_actions * DR_MODIFY_ACTION_SIZE;
+
+ do {
+ struct postsend_info send_info = {};
+
+ send_info.type = GTA_ARG;
+ send_info.write.addr = addr;
+ cur_sent = min_t(u32, data_len, DR_ACTION_CACHE_LINE_SIZE);
+ send_info.write.length = cur_sent;
+ send_info.write.lkey = 0;
+ send_info.remote_addr = arg_id + iter;
+
+ ret = dr_postsend_icm_data(dmn, &send_info);
+ if (ret)
+ goto out;
+
+ iter++;
+ addr += cur_sent;
+ data_len -= cur_sent;
+ } while (data_len > 0);
+
+out:
+ return ret;
+}
+
static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
struct mlx5dr_qp *dr_qp,
int port)
@@ -1123,16 +1289,25 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
goto free_mem;
}
+ dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size,
+ GFP_KERNEL);
+ if (!dmn->send_ring->sync_buff) {
+ ret = -ENOMEM;
+ goto clean_mr;
+ }
+
dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
dmn->pdn, dmn->send_ring->sync_buff,
- MIN_READ_SYNC);
+ dmn->send_ring->max_post_send_size);
if (!dmn->send_ring->sync_mr) {
ret = -ENOMEM;
- goto clean_mr;
+ goto free_sync_mem;
}
return 0;
+free_sync_mem:
+ kfree(dmn->send_ring->sync_buff);
clean_mr:
dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
free_mem:
@@ -1155,6 +1330,7 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
dr_dereg_mr(dmn->mdev, send_ring->mr);
kfree(send_ring->buf);
+ kfree(send_ring->sync_buff);
kfree(send_ring);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 1e15f605df6e..9413aaf51251 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -633,6 +633,63 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
used_hw_action_num);
}
+static int
+dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
+ u32 chunk_size;
+ int ret;
+
+ chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions));
+
+ /* HW modify action index granularity is at least 64B */
+ chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
+
+ action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+ chunk_size);
+ if (!action->rewrite->chunk)
+ return -ENOMEM;
+
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) -
+ dmn->info.caps.hdr_modify_icm_addr) /
+ DR_ACTION_CACHE_LINE_SIZE;
+
+ ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action);
+ if (ret)
+ goto free_chunk;
+
+ return 0;
+
+free_chunk:
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
+ return -ENOMEM;
+}
+
+static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action)
+{
+ mlx5dr_icm_free_chunk(action->rewrite->chunk);
+}
+
+int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
+
+ if (mlx5dr_domain_is_support_ptrn_arg(dmn))
+ return dmn->ste_ctx->alloc_modify_hdr_chunk(action);
+
+ return dr_ste_alloc_modify_hdr_chunk(action);
+}
+
+void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action)
+{
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
+
+ if (mlx5dr_domain_is_support_ptrn_arg(dmn))
+ return dmn->ste_ctx->dealloc_modify_hdr_chunk(action);
+
+ return dr_ste_free_modify_hdr_chunk(action);
+}
+
static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
struct mlx5dr_match_spec *spec)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 7075142bcfb6..54a6619c3ecb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -195,6 +195,8 @@ struct mlx5dr_ste_ctx {
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
+ int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
+ void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 084145f18084..4c0704ad166b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -495,21 +495,66 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
- u8 *s_action,
- u16 num_of_actions,
- u32 re_write_index)
+static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p,
+ u8 *d_action,
+ u16 num_of_actions,
+ u32 rewrite_pattern,
+ u32 rewrite_args,
+ u8 *action_data)
+{
+ if (action_data) {
+ memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE);
+ } else {
+ MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+ action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST);
+ MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+ modify_actions_pattern_pointer, rewrite_pattern);
+ MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+ number_of_modify_actions, num_of_actions);
+ MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+ modify_actions_argument_pointer, rewrite_args);
+ }
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p,
+ u8 *s_action,
+ u16 num_of_actions,
+ u32 rewrite_index)
{
MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_MODIFY_LIST);
MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
num_of_actions);
MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
- re_write_index);
+ rewrite_index);
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
+ u8 *action,
+ u16 num_of_actions,
+ u32 rewrite_pattern,
+ u32 rewrite_args,
+ u8 *action_data)
+{
+ if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX)
+ return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p,
+ action,
+ num_of_actions,
+ rewrite_pattern,
+ rewrite_args,
+ action_data);
+
+ /* fall back to the code that doesn't support accelerated modify header */
+ return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p,
+ action,
+ num_of_actions,
+ rewrite_args);
+}
+
static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
u32 object_id,
u32 offset,
@@ -604,9 +649,6 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
}
- if (action_type_set[DR_ACTION_TYP_CTR])
- dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
-
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
@@ -617,7 +659,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->modify_actions,
- attr->modify_index);
+ attr->modify_pat_idx,
+ attr->modify_index,
+ attr->single_modify_action);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_encap = false;
@@ -724,6 +768,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
attr->range.max);
}
+ /* set counter ID on the last STE to adhere to DMFS behavior */
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -743,7 +791,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->decap_actions,
- attr->decap_index);
+ attr->decap_pat_idx,
+ attr->decap_index,
+ NULL);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -798,7 +848,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
}
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->modify_actions,
- attr->modify_index);
+ attr->modify_pat_idx,
+ attr->modify_index,
+ attr->single_modify_action);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -2175,6 +2227,49 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
}
+int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
+{
+ struct mlx5dr_ptrn_mgr *ptrn_mgr;
+ int ret;
+
+ ptrn_mgr = action->rewrite->dmn->ptrn_mgr;
+ if (!ptrn_mgr)
+ return -EOPNOTSUPP;
+
+ action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr,
+ action->rewrite->num_of_actions,
+ action->rewrite->data);
+ if (!action->rewrite->arg) {
+ mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n");
+ return -EAGAIN;
+ }
+
+ action->rewrite->ptrn =
+ mlx5dr_ptrn_cache_get_pattern(ptrn_mgr,
+ action->rewrite->num_of_actions,
+ action->rewrite->data);
+ if (!action->rewrite->ptrn) {
+ mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n");
+ ret = -EAGAIN;
+ goto put_arg;
+ }
+
+ return 0;
+
+put_arg:
+ mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
+ action->rewrite->arg);
+ return ret;
+}
+
+void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
+{
+ mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr,
+ action->rewrite->ptrn);
+ mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
+ action->rewrite->arg);
+}
+
static struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
@@ -2231,6 +2326,9 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_add = &dr_ste_v1_set_action_add,
.set_action_copy = &dr_ste_v1_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
index b5c0f0f8392f..e2fc69867088 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
@@ -31,6 +31,8 @@ void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
u8 dst_len, u8 src_hw_field, u8 src_shifter);
int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
u32 hw_action_sz, u16 *used_hw_action_num);
+int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask);
void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
index cf1a3c9a1cf4..808b013cf48c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
@@ -221,6 +221,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_action_add = &dr_ste_v1_set_action_add,
.set_action_copy = &dr_ste_v1_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 2b769dcbd453..37b7b1a79f93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -21,11 +21,16 @@
#define DR_NUM_OF_FLEX_PARSERS 8
#define DR_STE_MAX_FLEX_0_ID 3
#define DR_STE_MAX_FLEX_1_ID 7
+#define DR_ACTION_CACHE_LINE_SIZE 64
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
+struct mlx5dr_ptrn_mgr;
+struct mlx5dr_arg_mgr;
+struct mlx5dr_arg_obj;
+
static inline bool dr_is_flex_parser_0_id(u8 parser_id)
{
return parser_id <= DR_STE_MAX_FLEX_0_ID;
@@ -66,6 +71,7 @@ enum mlx5dr_icm_chunk_size {
enum mlx5dr_icm_type {
DR_ICM_TYPE_STE,
DR_ICM_TYPE_MODIFY_ACTION,
+ DR_ICM_TYPE_MODIFY_HDR_PTRN,
};
static inline enum mlx5dr_icm_chunk_size
@@ -255,11 +261,15 @@ u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
#define MLX5DR_MAX_VLANS 2
+#define MLX5DR_INVALID_PATTERN_INDEX 0xffffffff
struct mlx5dr_ste_actions_attr {
u32 modify_index;
+ u32 modify_pat_idx;
u16 modify_actions;
+ u8 *single_modify_action;
u32 decap_index;
+ u32 decap_pat_idx;
u16 decap_actions;
u8 decap_with_vlan:1;
u64 final_icm_addr;
@@ -331,6 +341,8 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
+int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action);
+void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action);
const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
@@ -861,6 +873,8 @@ struct mlx5dr_cmd_caps {
u64 esw_tx_drop_address;
u32 log_icm_size;
u64 hdr_modify_icm_addr;
+ u32 log_modify_pattern_icm_size;
+ u64 hdr_modify_pattern_icm_addr;
u32 flex_protocols;
u8 flex_parser_id_icmp_dw0;
u8 flex_parser_id_icmp_dw1;
@@ -888,6 +902,9 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u16 log_header_modify_argument_granularity;
+ u16 log_header_modify_argument_max_alloc;
+ bool support_modify_argument;
u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@ -910,6 +927,7 @@ struct mlx5dr_domain_info {
u32 max_send_wr;
u32 max_log_sw_icm_sz;
u32 max_log_action_icm_sz;
+ u32 max_log_modify_hdr_pattern_icm_sz;
struct mlx5dr_domain_rx_tx rx;
struct mlx5dr_domain_rx_tx tx;
struct mlx5dr_cmd_caps caps;
@@ -928,6 +946,8 @@ struct mlx5dr_domain {
struct mlx5dr_send_info_pool *send_info_pool_tx;
struct kmem_cache *chunks_kmem_cache;
struct kmem_cache *htbls_kmem_cache;
+ struct mlx5dr_ptrn_mgr *ptrn_mgr;
+ struct mlx5dr_arg_mgr *arg_mgr;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
@@ -994,15 +1014,34 @@ struct mlx5dr_ste_action_modify_field {
u8 l4_type;
};
+struct mlx5dr_ptrn_obj {
+ struct mlx5dr_icm_chunk *chunk;
+ u8 *data;
+ u16 num_of_actions;
+ u32 index;
+ refcount_t refcount;
+ struct list_head list;
+};
+
+struct mlx5dr_arg_obj {
+ u32 obj_id;
+ u32 obj_offset;
+ struct list_head list_node;
+ u32 log_chunk_size;
+};
+
struct mlx5dr_action_rewrite {
struct mlx5dr_domain *dmn;
struct mlx5dr_icm_chunk *chunk;
u8 *data;
u16 num_of_actions;
u32 index;
+ u8 single_action_opt:1;
u8 allow_rx:1;
u8 allow_tx:1;
u8 modify_ttl:1;
+ struct mlx5dr_ptrn_obj *ptrn;
+ struct mlx5dr_arg_obj *arg;
};
struct mlx5dr_action_reformat {
@@ -1334,6 +1373,12 @@ struct mlx5dr_cmd_gid_attr {
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr);
+int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
+ u16 log_obj_range, u32 pd,
+ u32 *obj_id);
+void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
+ u32 obj_id);
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type);
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
@@ -1368,6 +1413,7 @@ struct mlx5dr_qp {
struct mlx5_wq_ctrl wq_ctrl;
u32 qpn;
struct {
+ unsigned int head;
unsigned int pc;
unsigned int cc;
unsigned int size;
@@ -1399,9 +1445,6 @@ struct mlx5dr_mr {
size_t size;
};
-#define MAX_SEND_CQE 64
-#define MIN_READ_SYNC 64
-
struct mlx5dr_send_ring {
struct mlx5dr_cq *cq;
struct mlx5dr_qp *qp;
@@ -1416,7 +1459,7 @@ struct mlx5dr_send_ring {
u32 tx_head;
void *buf;
u32 buf_size;
- u8 sync_buff[MIN_READ_SYNC];
+ u8 *sync_buff;
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
bool err_state; /* send_ring is not usable in err state */
@@ -1440,6 +1483,12 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
bool update_hw_ste);
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
+int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
+ struct mlx5dr_icm_chunk *chunk,
+ u16 num_of_actions,
+ u8 *data);
+int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
+ u16 num_of_actions, u8 *actions_data);
int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
@@ -1526,4 +1575,20 @@ static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
(1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
}
+bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn);
+struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn);
+void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr);
+struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
+ u16 num_of_actions, u8 *data);
+void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
+ struct mlx5dr_ptrn_obj *pattern);
+struct mlx5dr_arg_mgr *mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn);
+void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr);
+struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
+ u16 num_of_actions,
+ u8 *data);
+void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
+ struct mlx5dr_arg_obj *arg_obj);
+u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj);
+
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
index 790a17d6207f..ca3b0f1453a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
@@ -100,7 +100,7 @@ struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits {
u8 pointer[0x20];
};
-struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits {
+struct mlx5_ifc_ste_double_action_accelerated_modify_action_list_v1_bits {
u8 action_id[0x8];
u8 modify_actions_pattern_pointer[0x18];
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
index 017d68f1e123..972c571b4158 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
+ if (!multi)
+ return NULL;
tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 48dbfea0a2a1..7cdf0ce24f28 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -26,7 +26,7 @@
#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
-#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 400
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index f78e8ead8c36..c5aeeb964c17 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1476,15 +1476,6 @@ static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
hw_set_intr(hw, hw->intr_mask);
}
-static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
-{
- u32 read_intr;
-
- read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
- hw->intr_set = read_intr | interrupt;
- writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
-}
-
static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
{
*status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
@@ -1854,29 +1845,6 @@ static void port_init_cnt(struct ksz_hw *hw, int port)
*/
/**
- * port_chk - check port register bits
- * @hw: The hardware instance.
- * @port: The port index.
- * @offset: The offset of the port register.
- * @bits: The data bits to check.
- *
- * This function checks whether the specified bits of the port register are set
- * or not.
- *
- * Return 0 if the bits are not set.
- */
-static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
-{
- u32 addr;
- u16 data;
-
- PORT_CTRL_ADDR(port, addr);
- addr += offset;
- data = readw(hw->io + addr);
- return (data & bits) == bits;
-}
-
-/**
* port_cfg - set port register bits
* @hw: The hardware instance.
* @port: The port index.
@@ -1903,53 +1871,6 @@ static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
}
/**
- * port_chk_shift - check port bit
- * @hw: The hardware instance.
- * @port: The port index.
- * @addr: The offset of the register.
- * @shift: Number of bits to shift.
- *
- * This function checks whether the specified port is set in the register or
- * not.
- *
- * Return 0 if the port is not set.
- */
-static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
-{
- u16 data;
- u16 bit = 1 << port;
-
- data = readw(hw->io + addr);
- data >>= shift;
- return (data & bit) == bit;
-}
-
-/**
- * port_cfg_shift - set port bit
- * @hw: The hardware instance.
- * @port: The port index.
- * @addr: The offset of the register.
- * @shift: Number of bits to shift.
- * @set: The flag indicating whether the port is to be set or not.
- *
- * This routine sets or resets the specified port in the register.
- */
-static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
- int set)
-{
- u16 data;
- u16 bits = 1 << port;
-
- data = readw(hw->io + addr);
- bits <<= shift;
- if (set)
- data |= bits;
- else
- data &= ~bits;
- writew(data, hw->io + addr);
-}
-
-/**
* port_r8 - read byte from port register
* @hw: The hardware instance.
* @port: The port index.
@@ -2051,12 +1972,6 @@ static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
}
-static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
-}
-
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROTECTION_RATE 10
@@ -2209,102 +2124,6 @@ static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
}
-static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
-}
-
-static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
-}
-
-static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
-}
-
-/* Spanning Tree */
-
-static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
-}
-
-static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
-}
-
-static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
-{
- sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
-}
-
-static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
-{
- if (!(hw->overrides & FAST_AGING)) {
- sw_cfg_fast_aging(hw, 1);
- mdelay(1);
- sw_cfg_fast_aging(hw, 0);
- }
-}
-
-/* VLAN */
-
-static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
-}
-
-static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
-}
-
-static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
-}
-
-static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
-}
-
-static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
-}
-
-static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
-}
-
-static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
-}
-
-static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
-}
-
/* Mirroring */
static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
@@ -2342,28 +2161,6 @@ static void sw_init_mirror(struct ksz_hw *hw)
sw_cfg_mirror_rx_tx(hw, 0);
}
-static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
-{
- sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
- SWITCH_UNK_DEF_PORT_ENABLE, set);
-}
-
-static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
-{
- return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
- SWITCH_UNK_DEF_PORT_ENABLE);
-}
-
-static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
-{
- port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
-}
-
-static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
-{
- return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
-}
-
/* Priority */
static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
@@ -2390,30 +2187,6 @@ static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
}
-static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
-}
-
-static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
-}
-
-static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
-}
-
-static inline int port_chk_prio(struct ksz_hw *hw, int p)
-{
- return port_chk(hw, p,
- KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
-}
-
/**
* sw_dis_diffserv - disable switch DiffServ priority
* @hw: The hardware instance.
@@ -2614,23 +2387,6 @@ static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
}
/**
- * sw_get_addr - get the switch MAC address.
- * @hw: The hardware instance.
- * @mac_addr: Buffer to store the MAC address.
- *
- * This function retrieves the MAC address of the switch.
- */
-static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
-{
- int i;
-
- for (i = 0; i < 6; i += 2) {
- mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
- mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
- }
-}
-
-/**
* sw_set_addr - configure switch MAC address
* @hw: The hardware instance.
* @mac_addr: The MAC address.
@@ -2828,56 +2584,6 @@ static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
-static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
-{
- *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
-}
-
-static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
-{
- *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
-}
-
-static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
-{
- writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
-}
-
-static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
-{
- *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
-}
-
-static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
-{
- *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
-}
-
-static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
-{
- writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
-}
-
-static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
-{
- *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
-}
-
-static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
-{
- writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
-}
-
-static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
-{
- *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
-}
-
-static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
-{
- writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
-}
-
/**
* hw_r_phy - read data from PHY register
* @hw: The hardware instance.
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 9be6462f1cc5..2b6e046e1d10 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -605,7 +605,7 @@ static u64 lan966x_ifh_get(u8 *ifh, size_t pos, size_t length)
v = ifh[IFH_LEN_BYTES - (j / 8) - 1];
if (v & (1 << k))
- val |= (1 << i);
+ val |= (1ULL << i);
}
return val;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 3caea631229c..23b1521c0df9 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -133,12 +133,6 @@ out:
return act;
}
-static unsigned int mana_xdp_fraglen(unsigned int len)
-{
- return SKB_DATA_ALIGN(len) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
{
ASSERT_RTNL();
@@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
{
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
- int buf_max;
+ struct gdma_context *gc;
+
+ gc = apc->ac->gdma_dev->gdma_context;
old_prog = mana_xdp_get(apc);
if (!old_prog && !prog)
return 0;
- buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
- if (prog && buf_max > PAGE_SIZE) {
- netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
- ndev->mtu, buf_max);
+ if (prog && ndev->mtu > MANA_XDP_MTU_MAX) {
+ netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n",
+ ndev->mtu, MANA_XDP_MTU_MAX);
NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
return -EOPNOTSUPP;
@@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
+ if (prog)
+ ndev->max_mtu = MANA_XDP_MTU_MAX;
+ else
+ ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
+
return 0;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 492474b4d8aa..cabecbfa1102 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -427,6 +427,192 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
+/* Release pre-allocated RX buffers */
+static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
+{
+ struct device *dev;
+ int i;
+
+ dev = mpc->ac->gdma_dev->gdma_context->dev;
+
+ if (!mpc->rxbufs_pre)
+ goto out1;
+
+ if (!mpc->das_pre)
+ goto out2;
+
+ while (mpc->rxbpre_total) {
+ i = --mpc->rxbpre_total;
+ dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
+ DMA_FROM_DEVICE);
+ put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
+ }
+
+ kfree(mpc->das_pre);
+ mpc->das_pre = NULL;
+
+out2:
+ kfree(mpc->rxbufs_pre);
+ mpc->rxbufs_pre = NULL;
+
+out1:
+ mpc->rxbpre_datasize = 0;
+ mpc->rxbpre_alloc_size = 0;
+ mpc->rxbpre_headroom = 0;
+}
+
+/* Get a buffer from the pre-allocated RX buffers */
+static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
+{
+ struct net_device *ndev = rxq->ndev;
+ struct mana_port_context *mpc;
+ void *va;
+
+ mpc = netdev_priv(ndev);
+
+ if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
+ netdev_err(ndev, "No RX pre-allocated bufs\n");
+ return NULL;
+ }
+
+ /* Check sizes to catch unexpected coding error */
+ if (mpc->rxbpre_datasize != rxq->datasize) {
+ netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
+ mpc->rxbpre_datasize, rxq->datasize);
+ return NULL;
+ }
+
+ if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
+ netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
+ mpc->rxbpre_alloc_size, rxq->alloc_size);
+ return NULL;
+ }
+
+ if (mpc->rxbpre_headroom != rxq->headroom) {
+ netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
+ mpc->rxbpre_headroom, rxq->headroom);
+ return NULL;
+ }
+
+ mpc->rxbpre_total--;
+
+ *da = mpc->das_pre[mpc->rxbpre_total];
+ va = mpc->rxbufs_pre[mpc->rxbpre_total];
+ mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
+
+ /* Deallocate the array after all buffers are gone */
+ if (!mpc->rxbpre_total)
+ mana_pre_dealloc_rxbufs(mpc);
+
+ return va;
+}
+
+/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
+static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
+ u32 *headroom)
+{
+ if (mtu > MANA_XDP_MTU_MAX)
+ *headroom = 0; /* no support for XDP */
+ else
+ *headroom = XDP_PACKET_HEADROOM;
+
+ *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
+
+ *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
+}
+
+static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
+{
+ struct device *dev;
+ struct page *page;
+ dma_addr_t da;
+ int num_rxb;
+ void *va;
+ int i;
+
+ mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
+ &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
+
+ dev = mpc->ac->gdma_dev->gdma_context->dev;
+
+ num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
+
+ WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
+ mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
+ if (!mpc->rxbufs_pre)
+ goto error;
+
+ mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!mpc->das_pre)
+ goto error;
+
+ mpc->rxbpre_total = 0;
+
+ for (i = 0; i < num_rxb; i++) {
+ if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
+ va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
+ if (!va)
+ goto error;
+ } else {
+ page = dev_alloc_page();
+ if (!page)
+ goto error;
+
+ va = page_to_virt(page);
+ }
+
+ da = dma_map_single(dev, va + mpc->rxbpre_headroom,
+ mpc->rxbpre_datasize, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, da)) {
+ put_page(virt_to_head_page(va));
+ goto error;
+ }
+
+ mpc->rxbufs_pre[i] = va;
+ mpc->das_pre[i] = da;
+ mpc->rxbpre_total = i + 1;
+ }
+
+ return 0;
+
+error:
+ mana_pre_dealloc_rxbufs(mpc);
+ return -ENOMEM;
+}
+
+static int mana_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct mana_port_context *mpc = netdev_priv(ndev);
+ unsigned int old_mtu = ndev->mtu;
+ int err;
+
+ /* Pre-allocate buffers to prevent failure in mana_attach later */
+ err = mana_pre_alloc_rxbufs(mpc, new_mtu);
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new MTU\n");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto out;
+ }
+
+ ndev->mtu = new_mtu;
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ ndev->mtu = old_mtu;
+ }
+
+out:
+ mana_pre_dealloc_rxbufs(mpc);
+ return err;
+}
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
@@ -436,6 +622,7 @@ static const struct net_device_ops mana_devops = {
.ndo_get_stats64 = mana_get_stats64,
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
+ .ndo_change_mtu = mana_change_mtu,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -625,6 +812,9 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
+
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
req.proto_micro_ver = proto_micro_ver;
@@ -647,6 +837,11 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
+ if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ gc->adapter_mtu = resp.adapter_mtu;
+ else
+ gc->adapter_mtu = ETH_FRAME_LEN;
+
return 0;
}
@@ -1185,10 +1380,10 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
}
-static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
- struct xdp_buff *xdp)
+static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
+ uint pkt_len, struct xdp_buff *xdp)
{
- struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
+ struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
if (!skb)
return NULL;
@@ -1196,11 +1391,12 @@ static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
if (xdp->data_hard_start) {
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
- } else {
- skb_reserve(skb, XDP_PACKET_HEADROOM);
- skb_put(skb, pkt_len);
+ return skb;
}
+ skb_reserve(skb, rxq->headroom);
+ skb_put(skb, pkt_len);
+
return skb;
}
@@ -1233,7 +1429,7 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
if (act != XDP_PASS && act != XDP_TX)
goto drop_xdp;
- skb = mana_build_skb(buf_va, pkt_len, &xdp);
+ skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
if (!skb)
goto drop;
@@ -1282,14 +1478,72 @@ drop_xdp:
u64_stats_update_end(&rx_stats->syncp);
drop:
- WARN_ON_ONCE(rxq->xdp_save_page);
- rxq->xdp_save_page = virt_to_page(buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_va);
+ /* Save for reuse */
+ rxq->xdp_save_va = buf_va;
++ndev->stats.rx_dropped;
return;
}
+static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
+ dma_addr_t *da, bool is_napi)
+{
+ struct page *page;
+ void *va;
+
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_va) {
+ va = rxq->xdp_save_va;
+ rxq->xdp_save_va = NULL;
+ } else if (rxq->alloc_size > PAGE_SIZE) {
+ if (is_napi)
+ va = napi_alloc_frag(rxq->alloc_size);
+ else
+ va = netdev_alloc_frag(rxq->alloc_size);
+
+ if (!va)
+ return NULL;
+ } else {
+ page = dev_alloc_page();
+ if (!page)
+ return NULL;
+
+ va = page_to_virt(page);
+ }
+
+ *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, *da)) {
+ put_page(virt_to_head_page(va));
+ return NULL;
+ }
+
+ return va;
+}
+
+/* Allocate frag for rx buffer, and save the old buf */
+static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq,
+ struct mana_recv_buf_oob *rxoob, void **old_buf)
+{
+ dma_addr_t da;
+ void *va;
+
+ va = mana_get_rxfrag(rxq, dev, &da, true);
+
+ if (!va)
+ return;
+
+ dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+ DMA_FROM_DEVICE);
+ *old_buf = rxoob->buf_va;
+
+ rxoob->buf_va = va;
+ rxoob->sgl[0].address = da;
+}
+
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_comp *cqe)
{
@@ -1299,10 +1553,8 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct mana_recv_buf_oob *rxbuf_oob;
struct mana_port_context *apc;
struct device *dev = gc->dev;
- void *new_buf, *old_buf;
- struct page *new_page;
+ void *old_buf = NULL;
u32 curr, pktlen;
- dma_addr_t da;
apc = netdev_priv(ndev);
@@ -1345,40 +1597,11 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- /* Reuse XDP dropped page if available */
- if (rxq->xdp_save_page) {
- new_page = rxq->xdp_save_page;
- rxq->xdp_save_page = NULL;
- } else {
- new_page = alloc_page(GFP_ATOMIC);
- }
-
- if (new_page) {
- da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, da)) {
- __free_page(new_page);
- new_page = NULL;
- }
- }
-
- new_buf = new_page ? page_to_virt(new_page) : NULL;
-
- if (new_buf) {
- dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
- DMA_FROM_DEVICE);
-
- old_buf = rxbuf_oob->buf_va;
-
- /* refresh the rxbuf_oob with the new page */
- rxbuf_oob->buf_va = new_buf;
- rxbuf_oob->buf_dma_addr = da;
- rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
- } else {
- old_buf = NULL; /* drop the packet if no memory */
- }
+ mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf);
+ /* Unsuccessful refill will have old_buf == NULL.
+ * In this case, mana_rx_skb() will drop the packet.
+ */
mana_rx_skb(old_buf, oob, rxq);
drop:
@@ -1659,8 +1882,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
- if (rxq->xdp_save_page)
- __free_page(rxq->xdp_save_page);
+ if (rxq->xdp_save_va)
+ put_page(virt_to_head_page(rxq->xdp_save_va));
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
@@ -1668,10 +1891,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rx_oob->buf_va)
continue;
- dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
- DMA_FROM_DEVICE);
+ dma_unmap_single(dev, rx_oob->sgl[0].address,
+ rx_oob->sgl[0].size, DMA_FROM_DEVICE);
- free_page((unsigned long)rx_oob->buf_va);
+ put_page(virt_to_head_page(rx_oob->buf_va));
rx_oob->buf_va = NULL;
}
@@ -1681,6 +1904,30 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
kfree(rxq);
}
+static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
+ struct mana_rxq *rxq, struct device *dev)
+{
+ struct mana_port_context *mpc = netdev_priv(rxq->ndev);
+ dma_addr_t da;
+ void *va;
+
+ if (mpc->rxbufs_pre)
+ va = mana_get_rxbuf_pre(rxq, &da);
+ else
+ va = mana_get_rxfrag(rxq, dev, &da, false);
+
+ if (!va)
+ return -ENOMEM;
+
+ rx_oob->buf_va = va;
+
+ rx_oob->sgl[0].address = da;
+ rx_oob->sgl[0].size = rxq->datasize;
+ rx_oob->sgl[0].mem_key = mem_key;
+
+ return 0;
+}
+
#define MANA_WQE_HEADER_SIZE 16
#define MANA_WQE_SGE_SIZE 16
@@ -1690,11 +1937,10 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct mana_recv_buf_oob *rx_oob;
struct device *dev = gc->dev;
- struct page *page;
- dma_addr_t da;
u32 buf_idx;
+ int ret;
- WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
+ WARN_ON(rxq->datasize == 0);
*rxq_size = 0;
*cq_size = 0;
@@ -1703,25 +1949,12 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
rx_oob = &rxq->rx_oobs[buf_idx];
memset(rx_oob, 0, sizeof(*rx_oob));
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, da)) {
- __free_page(page);
- return -ENOMEM;
- }
-
- rx_oob->buf_va = page_to_virt(page);
- rx_oob->buf_dma_addr = da;
-
rx_oob->num_sge = 1;
- rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
- rx_oob->sgl[0].size = rxq->datasize;
- rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
+
+ ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
+ dev);
+ if (ret)
+ return ret;
rx_oob->wqe_req.sgl = rx_oob->sgl;
rx_oob->wqe_req.num_sge = rx_oob->num_sge;
@@ -1780,9 +2013,11 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->ndev = ndev;
rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
rxq->rxq_idx = rxq_idx;
- rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
rxq->rxobj = INVALID_MANA_HANDLE;
+ mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
+ &rxq->headroom);
+
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
goto out;
@@ -2194,8 +2429,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->netdev_ops = &mana_devops;
ndev->ethtool_ops = &mana_ethtool_ops;
ndev->mtu = ETH_DATA_LEN;
- ndev->max_mtu = ndev->mtu;
- ndev->min_mtu = ndev->mtu;
+ ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
+ ndev->min_mtu = ETH_MIN_MTU;
ndev->needed_headroom = MANA_HEADROOM;
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1502bb2c8ea7..1f5f00b30441 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -8,6 +8,7 @@
#include <linux/if_bridge.h>
#include <linux/iopoll.h>
#include <linux/phy/phy.h>
+#include <net/pkt_sched.h>
#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
@@ -1005,7 +1006,12 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
*/
if (ocelot->ops->cut_through_fwd) {
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ops->cut_through_fwd(ocelot);
+ /* Workaround for hardware bug - FP doesn't work
+ * at all link speeds for all PHY modes. The function
+ * below also calls ocelot->ops->cut_through_fwd(),
+ * so we don't need to do it twice.
+ */
+ ocelot_port_update_active_preemptible_tcs(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
}
@@ -2699,6 +2705,58 @@ void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
}
EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
+static void ocelot_port_reset_mqprio(struct ocelot *ocelot, int port)
+{
+ struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port);
+
+ netdev_reset_tc(dev);
+ ocelot_port_change_fp(ocelot, port, 0);
+}
+
+int ocelot_port_mqprio(struct ocelot *ocelot, int port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port);
+ struct netlink_ext_ack *extack = mqprio->extack;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ int num_tc = qopt->num_tc;
+ int tc, err;
+
+ if (!num_tc) {
+ ocelot_port_reset_mqprio(ocelot, port);
+ return 0;
+ }
+
+ err = netdev_set_num_tc(dev, num_tc);
+ if (err)
+ return err;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ if (qopt->count[tc] != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one TXQ per TC supported");
+ return -EINVAL;
+ }
+
+ err = netdev_set_tc_queue(dev, tc, 1, qopt->offset[tc]);
+ if (err)
+ goto err_reset_tc;
+ }
+
+ err = netif_set_real_num_tx_queues(dev, num_tc);
+ if (err)
+ goto err_reset_tc;
+
+ ocelot_port_change_fp(ocelot, port, mqprio->preemptible_tcs);
+
+ return 0;
+
+err_reset_tc:
+ ocelot_port_reset_mqprio(ocelot, port);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mqprio);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e9a0179448bf..87f2055c242c 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -74,6 +74,15 @@ struct ocelot_multicast {
struct ocelot_pgid *pgid;
};
+static inline void ocelot_reg_to_target_addr(struct ocelot *ocelot,
+ enum ocelot_reg reg,
+ enum ocelot_target *target,
+ u32 *addr)
+{
+ *target = reg >> TARGET_OFFSET;
+ *addr = ocelot->map[*target][reg & REG_MASK];
+}
+
int ocelot_bridge_num_find(struct ocelot *ocelot,
const struct net_device *bridge);
@@ -85,9 +94,6 @@ int ocelot_mact_forget(struct ocelot *ocelot,
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port);
int ocelot_netdev_to_port(struct net_device *dev);
-u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
-void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
-
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct device_node *portnp);
void ocelot_release_port(struct ocelot_port *ocelot_port);
@@ -110,6 +116,9 @@ int ocelot_stats_init(struct ocelot *ocelot);
void ocelot_stats_deinit(struct ocelot *ocelot);
int ocelot_mm_init(struct ocelot *ocelot);
+void ocelot_port_change_fp(struct ocelot *ocelot, int port,
+ unsigned long preemptible_tcs);
+void ocelot_port_update_active_preemptible_tcs(struct ocelot *ocelot, int port);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 2067382d0ee1..3aa7dc29ebe1 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -10,57 +10,60 @@
#include "ocelot.h"
-int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
- int count)
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, enum ocelot_reg reg,
+ u32 offset, void *buf, int count)
{
- u16 target = reg >> TARGET_OFFSET;
+ enum ocelot_target target;
+ u32 addr;
+ ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
- return regmap_bulk_read(ocelot->targets[target],
- ocelot->map[target][reg & REG_MASK] + offset,
+ return regmap_bulk_read(ocelot->targets[target], addr + offset,
buf, count);
}
EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix);
-u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
+u32 __ocelot_read_ix(struct ocelot *ocelot, enum ocelot_reg reg, u32 offset)
{
- u16 target = reg >> TARGET_OFFSET;
- u32 val;
+ enum ocelot_target target;
+ u32 addr, val;
+ ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
- regmap_read(ocelot->targets[target],
- ocelot->map[target][reg & REG_MASK] + offset, &val);
+ regmap_read(ocelot->targets[target], addr + offset, &val);
return val;
}
EXPORT_SYMBOL_GPL(__ocelot_read_ix);
-void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, enum ocelot_reg reg,
+ u32 offset)
{
- u16 target = reg >> TARGET_OFFSET;
+ enum ocelot_target target;
+ u32 addr;
+ ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
- regmap_write(ocelot->targets[target],
- ocelot->map[target][reg & REG_MASK] + offset, val);
+ regmap_write(ocelot->targets[target], addr + offset, val);
}
EXPORT_SYMBOL_GPL(__ocelot_write_ix);
-void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
- u32 offset)
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask,
+ enum ocelot_reg reg, u32 offset)
{
- u16 target = reg >> TARGET_OFFSET;
+ enum ocelot_target target;
+ u32 addr;
+ ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
- regmap_update_bits(ocelot->targets[target],
- ocelot->map[target][reg & REG_MASK] + offset,
- mask, val);
+ regmap_update_bits(ocelot->targets[target], addr + offset, mask, val);
}
EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
-u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
+u32 ocelot_port_readl(struct ocelot_port *port, enum ocelot_reg reg)
{
struct ocelot *ocelot = port->ocelot;
u16 target = reg >> TARGET_OFFSET;
@@ -73,7 +76,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
}
EXPORT_SYMBOL_GPL(ocelot_port_readl);
-void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
+void ocelot_port_writel(struct ocelot_port *port, u32 val, enum ocelot_reg reg)
{
struct ocelot *ocelot = port->ocelot;
u16 target = reg >> TARGET_OFFSET;
@@ -84,7 +87,8 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL_GPL(ocelot_port_writel);
-void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask,
+ enum ocelot_reg reg)
{
u32 cur = ocelot_port_readl(port, reg);
diff --git a/drivers/net/ethernet/mscc/ocelot_mm.c b/drivers/net/ethernet/mscc/ocelot_mm.c
index 0a8f21ae23f0..fb3145118d68 100644
--- a/drivers/net/ethernet/mscc/ocelot_mm.c
+++ b/drivers/net/ethernet/mscc/ocelot_mm.c
@@ -49,14 +49,68 @@ static enum ethtool_mm_verify_status ocelot_mm_verify_status(u32 val)
}
}
-void ocelot_port_mm_irq(struct ocelot *ocelot, int port)
+void ocelot_port_update_active_preemptible_tcs(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+ u32 val = 0;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ /* Only commit preemptible TCs when MAC Merge is active.
+ * On NXP LS1028A, when using QSGMII, the port hangs if transmitting
+ * preemptible frames at any other link speed than gigabit, so avoid
+ * preemption at lower speeds in this PHY mode.
+ */
+ if ((ocelot_port->phy_mode != PHY_INTERFACE_MODE_QSGMII ||
+ ocelot_port->speed == SPEED_1000) && mm->tx_active)
+ val = mm->preemptible_tcs;
+
+ /* Cut through switching doesn't work for preemptible priorities,
+ * so first make sure it is disabled.
+ */
+ mm->active_preemptible_tcs = val;
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ dev_dbg(ocelot->dev,
+ "port %d %s/%s, MM TX %s, preemptible TCs 0x%x, active 0x%x\n",
+ port, phy_modes(ocelot_port->phy_mode),
+ phy_speed_to_str(ocelot_port->speed),
+ mm->tx_active ? "active" : "inactive", mm->preemptible_tcs,
+ mm->active_preemptible_tcs);
+
+ ocelot_rmw_rix(ocelot, QSYS_PREEMPTION_CFG_P_QUEUES(val),
+ QSYS_PREEMPTION_CFG_P_QUEUES_M,
+ QSYS_PREEMPTION_CFG, port);
+}
+
+void ocelot_port_change_fp(struct ocelot *ocelot, int port,
+ unsigned long preemptible_tcs)
+{
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (mm->preemptible_tcs == preemptible_tcs)
+ goto out_unlock;
+
+ mm->preemptible_tcs = preemptible_tcs;
+
+ ocelot_port_update_active_preemptible_tcs(ocelot, port);
+
+out_unlock:
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+
+static void ocelot_mm_update_port_status(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_mm_state *mm = &ocelot->mm[port];
enum ethtool_mm_verify_status verify_status;
- u32 val;
+ u32 val, ack = 0;
- mutex_lock(&mm->lock);
+ if (!mm->tx_enabled)
+ return;
val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
@@ -73,25 +127,43 @@ void ocelot_port_mm_irq(struct ocelot *ocelot, int port)
dev_dbg(ocelot->dev, "Port %d TX preemption %s\n",
port, mm->tx_active ? "active" : "inactive");
+ ocelot_port_update_active_preemptible_tcs(ocelot, port);
+
+ ack |= DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY;
}
if (val & DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY) {
dev_err(ocelot->dev,
"Unexpected P-frame received on port %d while verification was unsuccessful or not yet verified\n",
port);
+
+ ack |= DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY;
}
if (val & DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY) {
dev_err(ocelot->dev,
"Unexpected P-frame requested to be transmitted on port %d while verification was unsuccessful or not yet verified, or MM_TX_ENA=0\n",
port);
+
+ ack |= DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY;
}
- ocelot_port_writel(ocelot_port, val, DEV_MM_STATUS);
+ if (ack)
+ ocelot_port_writel(ocelot_port, ack, DEV_MM_STATUS);
+}
- mutex_unlock(&mm->lock);
+void ocelot_mm_irq(struct ocelot *ocelot)
+{
+ int port;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_mm_update_port_status(ocelot, port);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
-EXPORT_SYMBOL_GPL(ocelot_port_mm_irq);
+EXPORT_SYMBOL_GPL(ocelot_mm_irq);
int ocelot_port_set_mm(struct ocelot *ocelot, int port,
struct ethtool_mm_cfg *cfg,
@@ -121,7 +193,7 @@ int ocelot_port_set_mm(struct ocelot *ocelot, int port,
if (!cfg->verify_enabled)
verify_disable = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS;
- mutex_lock(&mm->lock);
+ mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port_rmwl(ocelot_port, mm_enable,
DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA |
@@ -140,7 +212,20 @@ int ocelot_port_set_mm(struct ocelot *ocelot, int port,
QSYS_PREEMPTION_CFG,
port);
- mutex_unlock(&mm->lock);
+ /* The switch will emit an IRQ when TX is disabled, to notify that it
+ * has become inactive. We optimize ocelot_mm_update_port_status() to
+ * not bother processing MM IRQs at all for ports with TX disabled,
+ * but we need to ACK this IRQ now, while mm->tx_enabled is still set,
+ * otherwise we get an IRQ storm.
+ */
+ if (mm->tx_enabled && !cfg->tx_enabled) {
+ ocelot_mm_update_port_status(ocelot, port);
+ WARN_ON(mm->tx_active);
+ }
+
+ mm->tx_enabled = cfg->tx_enabled;
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
@@ -158,7 +243,7 @@ int ocelot_port_get_mm(struct ocelot *ocelot, int port,
mm = &ocelot->mm[port];
- mutex_lock(&mm->lock);
+ mutex_lock(&ocelot->fwd_domain_lock);
val = ocelot_port_readl(ocelot_port, DEV_MM_ENABLE_CONFIG);
state->pmac_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA);
@@ -174,10 +259,11 @@ int ocelot_port_get_mm(struct ocelot *ocelot, int port,
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);
state->rx_min_frag_size = ETH_ZLEN;
+ ocelot_mm_update_port_status(ocelot, port);
state->verify_status = mm->verify_status;
state->tx_active = mm->tx_active;
- mutex_unlock(&mm->lock);
+ mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
@@ -201,7 +287,6 @@ int ocelot_mm_init(struct ocelot *ocelot)
u32 val;
mm = &ocelot->mm[port];
- mutex_init(&mm->lock);
ocelot_port = ocelot->ports[port];
/* Update initial status variable for the
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index d0e6cd8dbe5c..5c55197c7327 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -145,7 +145,7 @@ enum ocelot_stat {
};
struct ocelot_stat_layout {
- u32 reg;
+ enum ocelot_reg reg;
char name[ETH_GSTRING_LEN];
};
@@ -257,7 +257,7 @@ struct ocelot_stat_layout {
struct ocelot_stats_region {
struct list_head node;
- u32 base;
+ enum ocelot_reg base;
enum ocelot_stat first_stat;
int count;
u32 *buf;
@@ -395,7 +395,7 @@ static void ocelot_check_stats_work(struct work_struct *work)
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
const struct ocelot_stat_layout *layout;
- int i;
+ enum ocelot_stat i;
if (sset != ETH_SS_STATS)
return;
@@ -442,7 +442,8 @@ out_unlock:
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
const struct ocelot_stat_layout *layout;
- int i, num_stats = 0;
+ enum ocelot_stat i;
+ int num_stats = 0;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
@@ -461,8 +462,8 @@ static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
const struct ocelot_stat_layout *layout;
+ enum ocelot_stat i;
u64 *data = priv;
- int i;
layout = ocelot_get_stats_layout(ocelot);
@@ -889,8 +890,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
{
struct ocelot_stats_region *region = NULL;
const struct ocelot_stat_layout *layout;
- unsigned int last = 0;
- int i;
+ enum ocelot_reg last = 0;
+ enum ocelot_stat i;
INIT_LIST_HEAD(&ocelot->stats_regions);
@@ -900,6 +901,17 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!layout[i].reg)
continue;
+ /* enum ocelot_stat must be kept sorted in the same order
+ * as the addresses behind layout[i].reg in order to have
+ * efficient bulking
+ */
+ if (last) {
+ WARN(ocelot->map[SYS][last & REG_MASK] >= ocelot->map[SYS][layout[i].reg & REG_MASK],
+ "reg 0x%x had address 0x%x but reg 0x%x has address 0x%x, bulking broken!",
+ last, ocelot->map[SYS][last & REG_MASK],
+ layout[i].reg, ocelot->map[SYS][layout[i].reg & REG_MASK]);
+ }
+
if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
ocelot->map[SYS][last & REG_MASK] + 4) {
region->count++;
@@ -909,12 +921,6 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- /* enum ocelot_stat must be kept sorted in the same
- * order as layout[i].reg in order to have efficient
- * bulking
- */
- WARN_ON(last >= layout[i].reg);
-
region->base = layout[i].reg;
region->first_stat = i;
region->count = 1;
@@ -925,6 +931,15 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
+ enum ocelot_target target;
+ u32 addr;
+
+ ocelot_reg_to_target_addr(ocelot, region->base, &target,
+ &addr);
+
+ dev_dbg(ocelot->dev,
+ "region of %d contiguous counters starting with SYS:STAT:CNT[0x%03x]\n",
+ region->count, addr / 4);
region->buf = devm_kcalloc(ocelot->dev, region->count,
sizeof(*region->buf), GFP_KERNEL);
if (!region->buf)
@@ -972,4 +987,3 @@ void ocelot_stats_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
}
-
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 5cabb1aa9c0c..0d6c59d6d4ae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -115,7 +115,7 @@ static const struct hwmon_channel_info nfp_power = {
.config = nfp_power_config,
};
-static const struct hwmon_channel_info *nfp_hwmon_info[] = {
+static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
&nfp_chip,
&nfp_temp,
&nfp_power,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 4f2308570dcf..54640bcb70fb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -189,6 +189,7 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
port->eth_port = &pf->eth_tbl->ports[id];
port->eth_id = pf->eth_tbl->ports[id].index;
+ port->netdev->dev_port = id;
if (pf->mac_stats_mem)
port->eth_stats =
pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index eac2f0e3576e..7505efdff8e9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -579,11 +579,10 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif)
diff |= diff >> 16;
diff |= diff >> 32;
- /* constrain to the hardware bitmask, and use this as the bitmask */
+ /* constrain to the hardware bitmask */
diff &= phc->cc.mask;
- phc->cc.mask = diff;
- /* the wrap period is now defined by diff (or phc->cc.mask)
+ /* the wrap period is now defined by diff
*
* we will update the time basis at about 1/4 the wrap period, so
* should not see a difference of more than +/- diff/4.
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 87f76bac2e46..eb827b86ecae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
int i, err, ring;
if (dev->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(dev->pdev);
+ err = pci_reset_function(dev->pdev);
+ if (err) {
+ dev_err(&dev->pdev->dev,
+ "Adapter reset failed (%d). Please reboot\n",
+ err);
+ return err;
+ }
dev->flags &= ~QLCNIC_NEED_FLR;
}
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index a4434eb38950..9210ff360fdc 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -52,6 +52,7 @@ config QCOM_EMAC
depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
+ select MDIO_DEVRES
help
This driver supports the Qualcomm Technologies, Inc. Gigabit
Ethernet Media Access Controller (EMAC). The controller
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 9f8357bbc8a4..a7e376e7e689 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -30,6 +30,7 @@
#include <linux/ipv6.h>
#include <asm/unaligned.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
#include "r8169.h"
#include "r8169_firmware.h"
@@ -68,6 +69,8 @@
#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+#define R8169_TX_STOP_THRS (MAX_SKB_FRAGS + 1)
+#define R8169_TX_START_THRS (2 * R8169_TX_STOP_THRS)
#define OCP_STD_PHY_BASE 0xa400
@@ -4162,13 +4165,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
-static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
+static unsigned int rtl_tx_slots_avail(struct rtl8169_private *tp)
{
- unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
- - READ_ONCE(tp->cur_tx);
-
- /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
- return slots_avail > MAX_SKB_FRAGS;
+ return READ_ONCE(tp->dirty_tx) + NUM_TX_DESC - READ_ONCE(tp->cur_tx);
}
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
@@ -4245,27 +4244,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
- stop_queue = !rtl_tx_slots_avail(tp);
- if (unlikely(stop_queue)) {
- /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
- * not miss a ring update when it notices a stopped queue.
- */
- smp_wmb();
- netif_stop_queue(dev);
- /* Sync with rtl_tx:
- * - publish queue status and cur_tx ring index (write barrier)
- * - refresh dirty_tx ring index (read barrier).
- * May the current thread have a pessimistic view of the ring
- * status and forget to wake up queue, a racing rtl_tx thread
- * can't.
- */
- smp_mb__after_atomic();
- if (rtl_tx_slots_avail(tp))
- netif_start_queue(dev);
- door_bell = true;
- }
-
- if (door_bell)
+ stop_queue = !netif_subqueue_maybe_stop(dev, 0, rtl_tx_slots_avail(tp),
+ R8169_TX_STOP_THRS,
+ R8169_TX_START_THRS);
+ if (door_bell || stop_queue)
rtl8169_doorbell(tp);
return NETDEV_TX_OK;
@@ -4389,19 +4371,12 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
}
if (tp->dirty_tx != dirty_tx) {
- netdev_completed_queue(dev, pkts_compl, bytes_compl);
dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
+ WRITE_ONCE(tp->dirty_tx, dirty_tx);
- /* Sync with rtl8169_start_xmit:
- * - publish dirty_tx ring index (write barrier)
- * - refresh cur_tx ring index and queue status (read barrier)
- * May the current thread miss the stopped queue condition,
- * a racing xmit thread can only have a right view of the
- * ring status.
- */
- smp_store_mb(tp->dirty_tx, dirty_tx);
- if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
- netif_wake_queue(dev);
+ netif_subqueue_completed_wake(dev, 0, pkts_compl, bytes_compl,
+ rtl_tx_slots_avail(tp),
+ R8169_TX_START_THRS);
/*
* 8168 hack: TxPoll requests are lost when the Tx packets are
* too close. Let's kick an extra TxPoll request when a burst
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 746fd9164e30..a4f22d8e6ac7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -540,7 +540,6 @@ int efx_net_open(struct net_device *net_dev)
else
efx->state = STATE_NET_UP;
- efx_selftest_async_start(efx);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index cc30524c2fe4..361687de308d 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -544,6 +544,8 @@ void efx_start_all(struct efx_nic *efx)
/* Start the hardware monitor if there is one */
efx_start_monitor(efx);
+ efx_selftest_async_start(efx);
+
/* Link state detection is normally event-driven; we have
* to poll now because we could have missed a change
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f77511fe4e87..5f5a997f21f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -165,6 +165,18 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_STARFIVE
+ tristate "StarFive dwmac support"
+ depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
+ select MFD_SYSCON
+ default m if ARCH_STARFIVE
+ help
+ Support for ethernet controllers on StarFive RISC-V SoCs
+
+ This selects the StarFive platform specific glue layer support for
+ the stmmac device driver. This driver is used for StarFive JH7110
+ ethernet controller.
+
config DWMAC_STI
tristate "STi GMAC support"
default ARCH_STI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 057e4bab5c08..8738fdbb4b2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 2e8744ac6b91..fb55efd52240 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -14,9 +14,9 @@
#include "stmmac.h"
-static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
+ int csum)
{
- struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->cur_tx;
@@ -125,9 +125,8 @@ static void init_dma_chain(void *des, dma_addr_t phy_addr,
}
}
-static void refill_desc3(void *priv_ptr, struct dma_desc *p)
+static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
{
- struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
struct stmmac_priv *priv = rx_q->priv_data;
if (priv->hwts_rx_en && !priv->extend_desc)
@@ -141,9 +140,8 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
sizeof(struct dma_desc)));
}
-static void clean_desc3(void *priv_ptr, struct dma_desc *p)
+static void clean_desc3(struct stmmac_tx_queue *tx_q, struct dma_desc *p)
{
- struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->dirty_tx;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 54bb072aeb2d..4ad692c4116c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -242,7 +242,7 @@ struct stmmac_safety_stats {
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
-/* DAM HW feature register fields */
+/* DMA HW feature register fields */
#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index dfbaea06d108..9354bf419112 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -20,18 +20,18 @@
#define GMAC_CONFIG_INTF_RGMII (0x1 << 0)
struct anarion_gmac {
- uintptr_t ctl_block;
+ void __iomem *ctl_block;
uint32_t phy_intf_sel;
};
static uint32_t gmac_read_reg(struct anarion_gmac *gmac, uint8_t reg)
{
- return readl((void *)(gmac->ctl_block + reg));
+ return readl(gmac->ctl_block + reg);
};
static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val)
{
- writel(val, (void *)(gmac->ctl_block + reg));
+ writel(val, gmac->ctl_block + reg);
}
static int anarion_gmac_init(struct platform_device *pdev, void *priv)
@@ -68,16 +68,16 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
- dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n",
- PTR_ERR(ctl_block));
- return ctl_block;
+ err = PTR_ERR(ctl_block);
+ dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err);
+ return ERR_PTR(err);
}
gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return ERR_PTR(-ENOMEM);
- gmac->ctl_block = (uintptr_t)ctl_block;
+ gmac->ctl_block = ctl_block;
err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
if (err)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 13aa919633b4..ab9f876b6df7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
priv->plat->mdio_bus_data->xpcs_an_inband = false;
} else {
priv->plat->max_speed = 1000;
- priv->plat->mdio_bus_data->xpcs_an_inband = true;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index e8b507f88fbc..f6754e3643f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -263,6 +263,11 @@ static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
return 0;
}
+static void meson8b_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
struct clk *clk)
{
@@ -273,8 +278,7 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
return ret;
return devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
+ meson8b_clk_disable_unprepare, clk);
}
static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 32763566c214..16a8c361283b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -11,6 +11,7 @@
#define RGMII_IO_MACRO_CONFIG 0x0
#define SDCC_HC_REG_DLL_CONFIG 0x4
+#define SDCC_TEST_CTL 0x8
#define SDCC_HC_REG_DDR_CONFIG 0xC
#define SDCC_HC_REG_DLL_CONFIG2 0x10
#define SDC4_STATUS 0x14
@@ -49,6 +50,7 @@
#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY GENMASK(26, 21)
#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE GENMASK(29, 27)
#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN BIT(30)
+#define SDCC_DDR_CONFIG_TCXO_CYCLES_CNT GENMASK(11, 9)
#define SDCC_DDR_CONFIG_PRG_RCLK_DLY GENMASK(8, 0)
/* SDCC_HC_REG_DLL_CONFIG2 fields */
@@ -78,7 +80,9 @@ struct ethqos_emac_por {
struct ethqos_emac_driver_data {
const struct ethqos_emac_por *por;
unsigned int num_por;
- bool rgmii_config_looback_en;
+ bool rgmii_config_loopback_en;
+ bool has_emac3;
+ struct dwmac4_addrs dwmac4_addrs;
};
struct qcom_ethqos {
@@ -91,7 +95,8 @@ struct qcom_ethqos {
const struct ethqos_emac_por *por;
unsigned int num_por;
- bool rgmii_config_looback_en;
+ bool rgmii_config_loopback_en;
+ bool has_emac3;
};
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
@@ -183,7 +188,8 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
.por = emac_v2_3_0_por,
.num_por = ARRAY_SIZE(emac_v2_3_0_por),
- .rgmii_config_looback_en = true,
+ .rgmii_config_loopback_en = true,
+ .has_emac3 = false,
};
static const struct ethqos_emac_por emac_v2_1_0_por[] = {
@@ -198,7 +204,40 @@ static const struct ethqos_emac_por emac_v2_1_0_por[] = {
static const struct ethqos_emac_driver_data emac_v2_1_0_data = {
.por = emac_v2_1_0_por,
.num_por = ARRAY_SIZE(emac_v2_1_0_por),
- .rgmii_config_looback_en = false,
+ .rgmii_config_loopback_en = false,
+ .has_emac3 = false,
+};
+
+static const struct ethqos_emac_por emac_v3_0_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x40c01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642c },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x80040800 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_driver_data emac_v3_0_0_data = {
+ .por = emac_v3_0_0_por,
+ .num_por = ARRAY_SIZE(emac_v3_0_0_por),
+ .rgmii_config_loopback_en = false,
+ .has_emac3 = true,
+ .dwmac4_addrs = {
+ .dma_chan = 0x00008100,
+ .dma_chan_offset = 0x1000,
+ .mtl_chan = 0x00008000,
+ .mtl_chan_offset = 0x1000,
+ .mtl_ets_ctrl = 0x00008010,
+ .mtl_ets_ctrl_offset = 0x1000,
+ .mtl_txq_weight = 0x00008018,
+ .mtl_txq_weight_offset = 0x1000,
+ .mtl_send_slp_cred = 0x0000801c,
+ .mtl_send_slp_cred_offset = 0x1000,
+ .mtl_high_cred = 0x00008020,
+ .mtl_high_cred_offset = 0x1000,
+ .mtl_low_cred = 0x00008024,
+ .mtl_low_cred_offset = 0x1000,
+ },
};
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
@@ -222,11 +261,13 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ if (!ethqos->has_emac3) {
+ rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+ }
/* Wait for CK_OUT_EN clear */
do {
@@ -261,28 +302,48 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
- 0, SDCC_HC_REG_DLL_CONFIG2);
+ if (!ethqos->has_emac3) {
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ 0, SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
- 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
+ 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
- BIT(2), SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
+ BIT(2), SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_HC_REG_DLL_CONFIG2);
+ }
return 0;
}
static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
{
+ int phase_shift;
+ int phy_mode;
+ int loopback;
+
+ /* Determine if the PHY adds a 2 ns TX delay or the MAC handles it */
+ phy_mode = device_get_phy_mode(&ethqos->pdev->dev);
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ phase_shift = 0;
+ else
+ phase_shift = RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN;
+
/* Disable loopback mode */
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
0, RGMII_IO_MACRO_CONFIG2);
+ /* Determine if this platform wants loopback enabled after programming */
+ if (ethqos->rgmii_config_loopback_en)
+ loopback = RGMII_CONFIG_LOOPBACK_EN;
+ else
+ loopback = 0;
+
/* Select RGMII, write 0 to interface select */
rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
0, RGMII_IO_MACRO_CONFIG);
@@ -300,27 +361,32 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
0, RGMII_IO_MACRO_CONFIG2);
+
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
- RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
- RGMII_IO_MACRO_CONFIG2);
+ phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
0, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
- /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
- 57, SDCC_HC_REG_DDR_CONFIG);
+ /* PRG_RCLK_DLY = TCXO period * TCXO_CYCLES_CNT / 2 * RX delay ns,
+ * in practice this becomes PRG_RCLK_DLY = 52 * 4 / 2 * RX delay ns
+ */
+ if (ethqos->has_emac3) {
+ /* 0.9 ns */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 115, SDCC_HC_REG_DDR_CONFIG);
+ } else {
+ /* 1.8 ns */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 57, SDCC_HC_REG_DDR_CONFIG);
+ }
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- if (ethqos->rgmii_config_looback_en)
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
- else
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
@@ -336,14 +402,20 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
0, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
- RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
- RGMII_IO_MACRO_CONFIG2);
+ phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
BIT(6), RGMII_IO_MACRO_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+
+ if (ethqos->has_emac3)
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
@@ -353,13 +425,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- if (ethqos->rgmii_config_looback_en)
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
- else
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- 0, RGMII_IO_MACRO_CONFIG);
-
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_10:
@@ -375,14 +442,19 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
0, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
- 0, RGMII_IO_MACRO_CONFIG2);
+ phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
BIT(12) | GENMASK(9, 8),
RGMII_IO_MACRO_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->has_emac3)
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
@@ -393,7 +465,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ loopback, RGMII_IO_MACRO_CONFIG);
break;
default:
dev_err(&ethqos->pdev->dev,
@@ -425,6 +497,17 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+ if (ethqos->has_emac3) {
+ if (ethqos->speed == SPEED_1000) {
+ rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL);
+ rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL);
+ rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
+ } else {
+ rgmii_writel(ethqos, 0x40010800, SDCC_USR_CTL);
+ rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
+ }
+ }
+
/* Clear DLL_RST */
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
SDCC_HC_REG_DLL_CONFIG);
@@ -444,7 +527,9 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
SDCC_HC_REG_DLL_CONFIG);
/* Set USR_CTL bit 26 with mask of 3 bits */
- rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL);
+ if (!ethqos->has_emac3)
+ rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26),
+ SDCC_USR_CTL);
/* wait for DLL LOCK */
do {
@@ -538,7 +623,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
- ethqos->rgmii_config_looback_en = data->rgmii_config_looback_en;
+ ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en;
+ ethqos->has_emac3 = data->has_emac3;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@@ -558,6 +644,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->has_gmac4 = 1;
+ plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
@@ -595,6 +682,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
static const struct of_device_id qcom_ethqos_match[] = {
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
+ { .compatible = "qcom,sc8280xp-ethqos", .data = &emac_v3_0_0_data},
{ .compatible = "qcom,sm8150-ethqos", .data = &emac_v2_1_0_data},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 4b8fd11563e4..4ea31ccf24d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -39,6 +39,24 @@ struct rk_gmac_ops {
u32 regs[];
};
+static const char * const rk_clocks[] = {
+ "aclk_mac", "pclk_mac", "mac_clk_tx", "clk_mac_speed",
+};
+
+static const char * const rk_rmii_clocks[] = {
+ "mac_clk_rx", "clk_mac_ref", "clk_mac_refout",
+};
+
+enum rk_clocks_index {
+ RK_ACLK_MAC = 0,
+ RK_PCLK_MAC,
+ RK_MAC_CLK_TX,
+ RK_CLK_MAC_SPEED,
+ RK_MAC_CLK_RX,
+ RK_CLK_MAC_REF,
+ RK_CLK_MAC_REFOUT,
+};
+
struct rk_priv_data {
struct platform_device *pdev;
phy_interface_t phy_iface;
@@ -51,15 +69,9 @@ struct rk_priv_data {
bool clock_input;
bool integrated_phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct clk *clk_mac;
- struct clk *gmac_clkin;
- struct clk *mac_clk_rx;
- struct clk *mac_clk_tx;
- struct clk *clk_mac_ref;
- struct clk *clk_mac_refout;
- struct clk *clk_mac_speed;
- struct clk *aclk_mac;
- struct clk *pclk_mac;
struct clk *clk_phy;
struct reset_control *phy_reset;
@@ -104,10 +116,11 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
int ret;
- if (IS_ERR(bsp_priv->clk_mac_speed)) {
+ if (!clk_mac_speed) {
dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
return;
}
@@ -116,7 +129,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_SPEED_10M);
- ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+ ret = clk_set_rate(clk_mac_speed, 2500000);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
__func__, ret);
@@ -124,7 +137,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_SPEED_100M);
- ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+ ret = clk_set_rate(clk_mac_speed, 25000000);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
__func__, ret);
@@ -1066,6 +1079,7 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
unsigned long rate;
int ret;
@@ -1085,7 +1099,7 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
return;
}
- ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ ret = clk_set_rate(clk_mac_speed, rate);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
__func__, rate, ret);
@@ -1371,6 +1385,7 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
unsigned long rate;
int ret;
@@ -1390,7 +1405,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
return;
}
- ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ ret = clk_set_rate(clk_mac_speed, rate);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
__func__, rate, ret);
@@ -1398,6 +1413,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
unsigned long rate;
int ret;
@@ -1414,7 +1430,7 @@ static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
return;
}
- ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ ret = clk_set_rate(clk_mac_speed, rate);
if (ret)
dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
__func__, rate, ret);
@@ -1475,68 +1491,50 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
struct device *dev = &bsp_priv->pdev->dev;
- int ret;
+ int phy_iface = bsp_priv->phy_iface;
+ int i, j, ret;
bsp_priv->clk_enabled = false;
- bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
- if (IS_ERR(bsp_priv->mac_clk_rx))
- dev_err(dev, "cannot get clock %s\n",
- "mac_clk_rx");
+ bsp_priv->num_clks = ARRAY_SIZE(rk_clocks);
+ if (phy_iface == PHY_INTERFACE_MODE_RMII)
+ bsp_priv->num_clks += ARRAY_SIZE(rk_rmii_clocks);
- bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
- if (IS_ERR(bsp_priv->mac_clk_tx))
- dev_err(dev, "cannot get clock %s\n",
- "mac_clk_tx");
+ bsp_priv->clks = devm_kcalloc(dev, bsp_priv->num_clks,
+ sizeof(*bsp_priv->clks), GFP_KERNEL);
+ if (!bsp_priv->clks)
+ return -ENOMEM;
- bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
- if (IS_ERR(bsp_priv->aclk_mac))
- dev_err(dev, "cannot get clock %s\n",
- "aclk_mac");
+ for (i = 0; i < ARRAY_SIZE(rk_clocks); i++)
+ bsp_priv->clks[i].id = rk_clocks[i];
- bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
- if (IS_ERR(bsp_priv->pclk_mac))
- dev_err(dev, "cannot get clock %s\n",
- "pclk_mac");
-
- bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
- if (IS_ERR(bsp_priv->clk_mac))
- dev_err(dev, "cannot get clock %s\n",
- "stmmaceth");
-
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
- bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
- if (IS_ERR(bsp_priv->clk_mac_ref))
- dev_err(dev, "cannot get clock %s\n",
- "clk_mac_ref");
-
- if (!bsp_priv->clock_input) {
- bsp_priv->clk_mac_refout =
- devm_clk_get(dev, "clk_mac_refout");
- if (IS_ERR(bsp_priv->clk_mac_refout))
- dev_err(dev, "cannot get clock %s\n",
- "clk_mac_refout");
- }
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ for (j = 0; j < ARRAY_SIZE(rk_rmii_clocks); j++)
+ bsp_priv->clks[i++].id = rk_rmii_clocks[j];
}
- bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
- if (IS_ERR(bsp_priv->clk_mac_speed))
- dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+ ret = devm_clk_bulk_get_optional(dev, bsp_priv->num_clks,
+ bsp_priv->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ /* "stmmaceth" will be enabled by the core */
+ bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+ ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
- } else {
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- clk_set_rate(bsp_priv->clk_mac, 50000000);
+ } else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ clk_set_rate(bsp_priv->clk_mac, 50000000);
}
if (plat->phy_node && bsp_priv->integrated_phy) {
bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0);
- if (IS_ERR(bsp_priv->clk_phy)) {
- ret = PTR_ERR(bsp_priv->clk_phy);
- dev_err(dev, "Cannot get PHY clock: %d\n", ret);
- return -EINVAL;
- }
+ ret = PTR_ERR_OR_ZERO(bsp_priv->clk_phy);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot get PHY clock\n");
clk_set_rate(bsp_priv->clk_phy, 50000000);
}
@@ -1545,77 +1543,36 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
{
- int phy_iface = bsp_priv->phy_iface;
+ int ret;
if (enable) {
if (!bsp_priv->clk_enabled) {
- if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- if (!IS_ERR(bsp_priv->mac_clk_rx))
- clk_prepare_enable(
- bsp_priv->mac_clk_rx);
-
- if (!IS_ERR(bsp_priv->clk_mac_ref))
- clk_prepare_enable(
- bsp_priv->clk_mac_ref);
-
- if (!IS_ERR(bsp_priv->clk_mac_refout))
- clk_prepare_enable(
- bsp_priv->clk_mac_refout);
- }
-
- if (!IS_ERR(bsp_priv->clk_phy))
- clk_prepare_enable(bsp_priv->clk_phy);
+ ret = clk_bulk_prepare_enable(bsp_priv->num_clks,
+ bsp_priv->clks);
+ if (ret)
+ return ret;
- if (!IS_ERR(bsp_priv->aclk_mac))
- clk_prepare_enable(bsp_priv->aclk_mac);
-
- if (!IS_ERR(bsp_priv->pclk_mac))
- clk_prepare_enable(bsp_priv->pclk_mac);
-
- if (!IS_ERR(bsp_priv->mac_clk_tx))
- clk_prepare_enable(bsp_priv->mac_clk_tx);
-
- if (!IS_ERR(bsp_priv->clk_mac_speed))
- clk_prepare_enable(bsp_priv->clk_mac_speed);
+ ret = clk_prepare_enable(bsp_priv->clk_phy);
+ if (ret)
+ return ret;
if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
bsp_priv->ops->set_clock_selection(bsp_priv,
bsp_priv->clock_input, true);
- /**
- * if (!IS_ERR(bsp_priv->clk_mac))
- * clk_prepare_enable(bsp_priv->clk_mac);
- */
mdelay(5);
bsp_priv->clk_enabled = true;
}
} else {
if (bsp_priv->clk_enabled) {
- if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- clk_disable_unprepare(bsp_priv->mac_clk_rx);
-
- clk_disable_unprepare(bsp_priv->clk_mac_ref);
-
- clk_disable_unprepare(bsp_priv->clk_mac_refout);
- }
-
+ clk_bulk_disable_unprepare(bsp_priv->num_clks,
+ bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- clk_disable_unprepare(bsp_priv->aclk_mac);
-
- clk_disable_unprepare(bsp_priv->pclk_mac);
-
- clk_disable_unprepare(bsp_priv->mac_clk_tx);
-
- clk_disable_unprepare(bsp_priv->clk_mac_speed);
-
if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
bsp_priv->ops->set_clock_selection(bsp_priv,
bsp_priv->clock_input, false);
- /**
- * if (!IS_ERR(bsp_priv->clk_mac))
- * clk_disable_unprepare(bsp_priv->clk_mac);
- */
+
bsp_priv->clk_enabled = false;
}
}
@@ -1629,9 +1586,6 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
int ret;
struct device *dev = &bsp_priv->pdev->dev;
- if (!ldo)
- return 0;
-
if (enable) {
ret = regulator_enable(ldo);
if (ret)
@@ -1679,14 +1633,11 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
}
}
- bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
+ bsp_priv->regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(bsp_priv->regulator)) {
- if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
- dev_err(dev, "phy regulator is not available yet, deferred probing\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
- dev_err(dev, "no regulator found\n");
- bsp_priv->regulator = NULL;
+ ret = PTR_ERR(bsp_priv->regulator);
+ dev_err_probe(dev, ret, "failed to get phy regulator\n");
+ return ERR_PTR(ret);
}
ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
new file mode 100644
index 000000000000..4f51a7889642
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive DWMAC platform driver
+ *
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ *
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "stmmac_platform.h"
+
+#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+
+struct starfive_dwmac {
+ struct device *dev;
+ struct clk *clk_tx;
+};
+
+static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct starfive_dwmac *dwmac = priv;
+ unsigned long rate;
+ int err;
+
+ rate = clk_get_rate(dwmac->clk_tx);
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ default:
+ dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ break;
+ }
+
+ err = clk_set_rate(dwmac->clk_tx, rate);
+ if (err)
+ dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
+}
+
+static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
+ struct regmap *regmap;
+ unsigned int args[2];
+ unsigned int mode;
+ int err;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = STARFIVE_DWMAC_PHY_INFT_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "unsupported interface %d\n",
+ plat_dat->interface);
+ return -EINVAL;
+ }
+
+ regmap = syscon_regmap_lookup_by_phandle_args(dwmac->dev->of_node,
+ "starfive,syscon",
+ 2, args);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dwmac->dev, PTR_ERR(regmap), "getting the regmap failed\n");
+
+ /* args[0]:offset args[1]: shift */
+ err = regmap_update_bits(regmap, args[0],
+ STARFIVE_DWMAC_PHY_INFT_FIELD << args[1],
+ mode << args[1]);
+ if (err)
+ return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+
+ return 0;
+}
+
+static int starfive_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct starfive_dwmac *dwmac;
+ struct clk *clk_gtx;
+ int err;
+
+ err = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to get resources\n");
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(dwmac->clk_tx))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ "error getting tx clock\n");
+
+ clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
+ if (IS_ERR(clk_gtx))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_gtx),
+ "error getting gtx clock\n");
+
+ /* Generally, the rgmii_tx clock is provided by the internal clock,
+ * which needs to match the corresponding clock frequency according
+ * to different speeds. If the rgmii_tx clock is provided by the
+ * external rgmii_rxin, there is no need to configure the clock
+ * internally, because rgmii_rxin will be adaptively adjusted.
+ */
+ if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
+ plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+
+ dwmac->dev = &pdev->dev;
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->dma_cfg->dche = true;
+
+ err = starfive_dwmac_set_mode(plat_dat);
+ if (err)
+ return err;
+
+ err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (err) {
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id starfive_dwmac_match[] = {
+ { .compatible = "starfive,jh7110-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
+
+static struct platform_driver starfive_dwmac_driver = {
+ .probe = starfive_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "starfive-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = starfive_dwmac_match,
+ },
+};
+module_platform_driver(starfive_dwmac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("StarFive DWMAC platform driver");
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_AUTHOR("Samin Guo <samin.guo@starfivetech.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index be3b1ebc06ab..465ce66ef9c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -35,7 +35,7 @@
#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
-/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families)
+/* STiH4xx register definitions (STiH407/STiH410 families)
*
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes with link speeds.
@@ -75,27 +75,6 @@
#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
-/* STiD127 register definitions
- *-----------------------
- * src |BIT(6)| BIT(7)|
- *-----------------------
- * MII | 1 | n/a |
- *-----------------------
- * RMII | n/a | 1 |
- * clkgen| | |
- *-----------------------
- * RMII | n/a | 0 |
- * phyclk| | |
- *-----------------------
- * RGMII | 1 | n/a |
- * clkgen| | |
- *-----------------------
- */
-
-#define STID127_RETIME_SRC_MASK GENMASK(7, 6)
-#define STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
-#define STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK BIT(6)
-
#define ENMII_MASK GENMASK(5, 5)
#define ENMII BIT(5)
#define EN_MASK GENMASK(1, 1)
@@ -194,36 +173,6 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd)
stih4xx_tx_retime_val[src]);
}
-static void stid127_fix_retime_src(void *priv, u32 spd)
-{
- struct sti_dwmac *dwmac = priv;
- u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
- u32 val = 0;
-
- if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
- val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK;
- } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
- if (!dwmac->ext_phyclk) {
- val = STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK;
- freq = DWMAC_50MHZ;
- }
- } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
- val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK;
- if (spd == SPEED_1000)
- freq = DWMAC_125MHZ;
- else if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
-
- if (freq)
- clk_set_rate(dwmac->clk, freq);
-
- regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
-}
-
static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
{
struct regmap *regmap = dwmac->regmap;
@@ -408,14 +357,7 @@ static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
-static const struct sti_dwmac_of_data stid127_dwmac_data = {
- .fix_retime_src = stid127_fix_retime_src,
-};
-
static const struct of_device_id sti_dwmac_match[] = {
- { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f834472599f7..c2c592ba0eb8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -304,7 +304,8 @@ static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
}
-static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
+static void sun8i_dwmac_dma_init_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_rx_phy, u32 chan)
{
@@ -312,7 +313,8 @@ static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
writel(lower_32_bits(dma_rx_phy), ioaddr + EMAC_RX_DESC_LIST);
}
-static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
+static void sun8i_dwmac_dma_init_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_tx_phy, u32 chan)
{
@@ -324,7 +326,8 @@ static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
* Called from stmmac_dma_ops->dump_regs
* Used for ethtool
*/
-static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+static void sun8i_dwmac_dump_regs(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 *reg_space)
{
int i;
@@ -352,7 +355,8 @@ static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
}
}
-static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+static void sun8i_dwmac_enable_dma_irq(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{
u32 value = readl(ioaddr + EMAC_INT_EN);
@@ -365,7 +369,8 @@ static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
writel(value, ioaddr + EMAC_INT_EN);
}
-static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+static void sun8i_dwmac_disable_dma_irq(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{
u32 value = readl(ioaddr + EMAC_INT_EN);
@@ -378,7 +383,8 @@ static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
writel(value, ioaddr + EMAC_INT_EN);
}
-static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -398,7 +404,8 @@ static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(v, ioaddr + EMAC_TX_CTL1);
}
-static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_dma_stop_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -407,7 +414,8 @@ static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
writel(v, ioaddr + EMAC_TX_CTL1);
}
-static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_dma_start_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -417,7 +425,8 @@ static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
writel(v, ioaddr + EMAC_RX_CTL1);
}
-static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+static void sun8i_dwmac_dma_stop_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan)
{
u32 v;
@@ -426,7 +435,8 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
writel(v, ioaddr + EMAC_RX_CTL1);
}
-static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
+static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
@@ -492,7 +502,8 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+static void sun8i_dwmac_dma_operation_mode_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 v;
@@ -515,7 +526,8 @@ static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
writel(v, ioaddr + EMAC_RX_CTL1);
}
-static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+static void sun8i_dwmac_dma_operation_mode_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 v;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 0e00dd83d027..3927609abc44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -414,7 +414,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
+ struct stmmac_extra_stats *x,
u32 rx_queues, u32 tx_queues)
{
u32 value = readl(ioaddr + GMAC_DEBUG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index f5581db0ba9b..daf79cdbd3ec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -110,7 +110,8 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
-static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
+static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_rx_phy, u32 chan)
{
@@ -118,7 +119,8 @@ static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
}
-static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
+static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_tx_phy, u32 chan)
{
@@ -147,7 +149,8 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
return csr6;
}
-static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -175,7 +178,8 @@ static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
writel(csr6, ioaddr + DMA_CONTROL);
}
-static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -208,7 +212,8 @@ static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
writel(csr6, ioaddr + DMA_CONTROL);
}
-static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 *reg_space)
{
int i;
@@ -263,8 +268,8 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
return 0;
}
-static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
- u32 queue)
+static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 riwt, u32 queue)
{
writel(riwt, ioaddr + DMA_RX_WATCHDOG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 8f0d9bc7cab5..1c32b1788f02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -29,7 +29,7 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
-static void dwmac100_dma_init_rx(void __iomem *ioaddr,
+static void dwmac100_dma_init_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_rx_phy, u32 chan)
{
@@ -37,7 +37,7 @@ static void dwmac100_dma_init_rx(void __iomem *ioaddr,
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
}
-static void dwmac100_dma_init_tx(void __iomem *ioaddr,
+static void dwmac100_dma_init_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_tx_phy, u32 chan)
{
@@ -50,7 +50,8 @@ static void dwmac100_dma_init_tx(void __iomem *ioaddr,
* The transmit threshold can be programmed by setting the TTC bits in the DMA
* control register.
*/
-static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+static void dwmac100_dma_operation_mode_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -65,7 +66,8 @@ static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
writel(csr6, ioaddr + DMA_CONTROL);
}
-static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+static void dwmac100_dump_dma_regs(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 *reg_space)
{
int i;
@@ -80,10 +82,10 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
}
/* DMA controller has two counters to track the number of the missed frames. */
-static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+static void dwmac100_dma_diagnostic_fr(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
void __iomem *ioaddr)
{
- struct net_device_stats *stats = (struct net_device_stats *)data;
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
if (unlikely(csr8)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index ccd49346d3b3..4538f334df57 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -336,14 +336,25 @@ enum power_event {
#define MTL_CHAN_BASE_ADDR 0x00000d00
#define MTL_CHAN_BASE_OFFSET 0x40
-#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
- (x * MTL_CHAN_BASE_OFFSET))
-#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
-#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
-#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
-#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
-#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
+static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->mtl_chan + (x * addrs->mtl_chan_offset);
+ else
+ addr = MTL_CHAN_BASE_ADDR + (x * MTL_CHAN_BASE_OFFSET);
+
+ return addr;
+}
+
+#define MTL_CHAN_TX_OP_MODE(addrs, x) mtl_chanx_base_addr(addrs, x)
+#define MTL_CHAN_TX_DEBUG(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x8)
+#define MTL_CHAN_INT_CTRL(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x2c)
+#define MTL_CHAN_RX_OP_MODE(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x30)
+#define MTL_CHAN_RX_DEBUG(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x38)
#define MTL_OP_MODE_RSF BIT(5)
#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2)
@@ -388,8 +399,19 @@ enum power_event {
/* MTL ETS Control register */
#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
#define MTL_ETS_CTRL_BASE_OFFSET 0x40
-#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
- ((x) * MTL_ETS_CTRL_BASE_OFFSET))
+
+static inline u32 mtl_etsx_ctrl_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->mtl_ets_ctrl + (x * addrs->mtl_ets_ctrl_offset);
+ else
+ addr = MTL_ETS_CTRL_BASE_ADDR + (x * MTL_ETS_CTRL_BASE_OFFSET);
+
+ return addr;
+}
#define MTL_ETS_CTRL_CC BIT(3)
#define MTL_ETS_CTRL_AVALG BIT(2)
@@ -397,31 +419,76 @@ enum power_event {
/* MTL Queue Quantum Weight */
#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
-#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
- ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
+
+static inline u32 mtl_txqx_weight_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->mtl_txq_weight + (x * addrs->mtl_txq_weight_offset);
+ else
+ addr = MTL_TXQ_WEIGHT_BASE_ADDR + (x * MTL_TXQ_WEIGHT_BASE_OFFSET);
+
+ return addr;
+}
+
#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
/* MTL sendSlopeCredit register */
#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
#define MTL_SEND_SLP_CRED_OFFSET 0x40
-#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
- ((x) * MTL_SEND_SLP_CRED_OFFSET))
+
+static inline u32 mtl_send_slp_credx_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->mtl_send_slp_cred + (x * addrs->mtl_send_slp_cred_offset);
+ else
+ addr = MTL_SEND_SLP_CRED_BASE_ADDR + (x * MTL_SEND_SLP_CRED_OFFSET);
+
+ return addr;
+}
#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
/* MTL hiCredit register */
#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
#define MTL_HIGH_CRED_OFFSET 0x40
-#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
- ((x) * MTL_HIGH_CRED_OFFSET))
+
+static inline u32 mtl_high_credx_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->mtl_high_cred + (x * addrs->mtl_high_cred_offset);
+ else
+ addr = MTL_HIGH_CRED_BASE_ADDR + (x * MTL_HIGH_CRED_OFFSET);
+
+ return addr;
+}
#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
/* MTL loCredit register */
#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
#define MTL_LOW_CRED_OFFSET 0x40
-#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
- ((x) * MTL_LOW_CRED_OFFSET))
+
+static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->mtl_low_cred + (x * addrs->mtl_low_cred_offset);
+ else
+ addr = MTL_LOW_CRED_BASE_ADDR + (x * MTL_LOW_CRED_OFFSET);
+
+ return addr;
+}
#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 36251ec2589c..afaec3fb9ab6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -198,15 +198,18 @@ static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
writel(value, ioaddr + MTL_OPERATION_MODE);
}
-static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
+ struct mac_device_info *hw,
u32 weight, u32 queue)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+ u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
+ queue));
value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
- writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+ writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
}
static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
@@ -227,10 +230,12 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
}
}
-static void dwmac4_config_cbs(struct mac_device_info *hw,
+static void dwmac4_config_cbs(struct stmmac_priv *priv,
+ struct mac_device_info *hw,
u32 send_slope, u32 idle_slope,
u32 high_credit, u32 low_credit, u32 queue)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
void __iomem *ioaddr = hw->pcsr;
u32 value;
@@ -241,31 +246,33 @@ static void dwmac4_config_cbs(struct mac_device_info *hw,
pr_debug("\tlow_credit: 0x%08x\n", low_credit);
/* enable AV algorithm */
- value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+ value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
value |= MTL_ETS_CTRL_AVALG;
value |= MTL_ETS_CTRL_CC;
- writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+ writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
/* configure send slope */
- value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+ value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
+ queue));
value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
- writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+ writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
+ queue));
/* configure idle slope (same register as tx weight) */
- dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
+ dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
/* configure high credit */
- value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+ value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
value &= ~MTL_HIGH_CRED_HC_MASK;
value |= high_credit & MTL_HIGH_CRED_HC_MASK;
- writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+ writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
/* configure high credit */
- value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+ value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
value &= ~MTL_HIGH_CRED_LC_MASK;
value |= low_credit & MTL_HIGH_CRED_LC_MASK;
- writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+ writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
}
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
@@ -759,8 +766,10 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
}
}
-static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
+static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
+ struct mac_device_info *hw, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
void __iomem *ioaddr = hw->pcsr;
u32 mtl_int_qx_status;
int ret = 0;
@@ -770,12 +779,13 @@ static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
/* Check MTL Interrupt */
if (mtl_int_qx_status & MTL_INT_QX(chan)) {
/* read Queue x Interrupt status */
- u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
+ chan));
if (status & MTL_RX_OVERFLOW_INT) {
/* clear Interrupt */
writel(status | MTL_RX_OVERFLOW_INT,
- ioaddr + MTL_CHAN_INT_CTRL(chan));
+ ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
ret = CORE_IRQ_MTL_RX_OVERFLOW;
}
}
@@ -833,14 +843,16 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
+ struct stmmac_extra_stats *x,
u32 rx_queues, u32 tx_queues)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
u32 queue;
for (queue = 0; queue < tx_queues; queue++) {
- value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
if (value & MTL_DEBUG_TXSTSFSTS)
x->mtl_tx_status_fifo_full++;
@@ -865,7 +877,7 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
}
for (queue = 0; queue < rx_queues; queue++) {
- value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
if (value & MTL_DEBUG_RXFSTS_MASK) {
u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 8cc80b1db4cb..6a011d8633e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -13,11 +13,11 @@
#include "dwmac4.h"
#include "dwmac4_descs.h"
-static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p,
void __iomem *ioaddr)
{
- struct net_device_stats *stats = (struct net_device_stats *)data;
unsigned int tdes3;
int ret = tx_done;
@@ -73,10 +73,10 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- struct net_device_stats *stats = (struct net_device_stats *)data;
unsigned int rdes1 = le32_to_cpu(p->des1);
unsigned int rdes2 = le32_to_cpu(p->des2);
unsigned int rdes3 = le32_to_cpu(p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d99fa028c646..84d3a8551b03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include "dwmac4.h"
#include "dwmac4_dma.h"
+#include "stmmac.h"
static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
@@ -68,77 +69,87 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
-static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+static void dwmac4_dma_init_rx_chan(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_rx_phy, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
- value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
writel(upper_32_bits(dma_rx_phy),
- ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan));
+ ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, chan));
- writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+ writel(lower_32_bits(dma_rx_phy),
+ ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, chan));
}
-static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+static void dwmac4_dma_init_tx_chan(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t dma_tx_phy, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
/* Enable OSP to get best performance */
value |= DMA_CONTROL_OSP;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
writel(upper_32_bits(dma_tx_phy),
- ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan));
+ ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, chan));
- writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+ writel(lower_32_bits(dma_tx_phy),
+ ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, chan));
}
-static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+static void dwmac4_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
/* common channel control register config */
- value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
value = value | DMA_BUS_MODE_PBL;
- writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
/* Mask interrupts by writing to CSR7 */
writel(DMA_CHAN_INTR_DEFAULT_MASK,
- ioaddr + DMA_CHAN_INTR_ENA(chan));
+ ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
-static void dwmac410_dma_init_channel(void __iomem *ioaddr,
+static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
/* common channel control register config */
- value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (dma_cfg->pblx8)
value = value | DMA_BUS_MODE_PBL;
- writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
/* Mask interrupts by writing to CSR7 */
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
- ioaddr + DMA_CHAN_INTR_ENA(chan));
+ ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
static void dwmac4_dma_init(void __iomem *ioaddr,
@@ -176,65 +187,78 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
}
-static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
+static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 channel,
u32 *reg_space)
{
- reg_space[DMA_CHAN_CONTROL(channel) / 4] =
- readl(ioaddr + DMA_CHAN_CONTROL(channel));
- reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] =
- readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
- reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] =
- readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
- reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] =
- readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
- reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] =
- readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
- reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] =
- readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel));
- reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] =
- readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel));
- reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] =
- readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel));
- reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] =
- readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel));
- reg_space[DMA_CHAN_INTR_ENA(channel) / 4] =
- readl(ioaddr + DMA_CHAN_INTR_ENA(channel));
- reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] =
- readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel));
- reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] =
- readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel));
- reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] =
- readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel));
- reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] =
- readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel));
- reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] =
- readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel));
- reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] =
- readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel));
- reg_space[DMA_CHAN_STATUS(channel) / 4] =
- readl(ioaddr + DMA_CHAN_STATUS(channel));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ const struct dwmac4_addrs *default_addrs = NULL;
+
+ /* Purposely save the registers in the "normal" layout, regardless of
+ * platform modifications, to keep reg_space size constant
+ */
+ reg_space[DMA_CHAN_CONTROL(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_TX_CONTROL(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_END_ADDR(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_TX_RING_LEN(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_RING_LEN(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_INTR_ENA(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_RX_WATCHDOG(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_SLOT_CTRL_STATUS(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_TX_DESC(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel));
+ reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] =
+ readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, channel));
}
-static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+static void dwmac4_dump_dma_regs(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 *reg_space)
{
int i;
for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
- _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
+ _dwmac4_dump_dma_regs(priv, ioaddr, i, reg_space);
}
-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
+static void dwmac4_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 riwt, u32 queue)
{
- writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(dwmac4_addrs, queue));
}
-static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
unsigned int rqs = fifosz / 256 - 1;
u32 mtl_rx_op;
- mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
@@ -292,13 +316,16 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
}
- writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
}
-static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
+ void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs,
+ channel));
unsigned int tqs = fifosz / 256 - 1;
if (mode == SF_DMA_MODE) {
@@ -344,7 +371,7 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
- writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
}
static int dwmac4_get_hw_feature(void __iomem *ioaddr,
@@ -442,26 +469,31 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr,
}
/* Enable/disable TSO feature and set MSS */
-static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+static void dwmac4_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
if (en) {
/* enable TSO */
- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
writel(value | DMA_CONTROL_TSE,
- ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
} else {
/* enable TSO */
- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
writel(value & ~DMA_CONTROL_TSE,
- ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
}
}
-static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+static void dwmac4_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 channel, u8 qmode)
{
- u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs,
+ channel));
mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
if (qmode != MTL_QUEUE_AVB)
@@ -469,47 +501,54 @@ static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
else
mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
- writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(dwmac4_addrs, channel));
}
-static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+static void dwmac4_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int bfsize, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value &= ~DMA_RBSZ_MASK;
value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
}
-static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan)
{
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
value &= ~GMAC_CONFIG_HDSMS;
value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
writel(value, ioaddr + GMAC_EXT_CONFIG);
- value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_SPH;
else
value &= ~DMA_CONTROL_SPH;
- writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
}
-static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+static int dwmac4_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_EDSE;
else
value &= ~DMA_CONTROL_EDSE;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE;
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs,
+ chan)) & DMA_CONTROL_EDSE;
if (en && !value)
return -EIO;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 9321879b599c..358e7dcb6a9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -95,29 +95,41 @@
/* Following DMA defines are chanels oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
-#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
- (x * DMA_CHAN_BASE_OFFSET))
+
+static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
+ const u32 x)
+{
+ u32 addr;
+
+ if (addrs)
+ addr = addrs->dma_chan + (x * addrs->dma_chan_offset);
+ else
+ addr = DMA_CHAN_BASE_ADDR + (x * DMA_CHAN_BASE_OFFSET);
+
+ return addr;
+}
+
#define DMA_CHAN_REG_NUMBER 17
-#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
-#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
-#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
-#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10)
-#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
-#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18)
-#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
-#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
-#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
-#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
-#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
-#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
-#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
-#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
-#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
-#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
-#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
-#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
-#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
+#define DMA_CHAN_CONTROL(addrs, x) dma_chanx_base_addr(addrs, x)
+#define DMA_CHAN_TX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4)
+#define DMA_CHAN_RX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x10)
+#define DMA_CHAN_TX_BASE_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x18)
+#define DMA_CHAN_RX_BASE_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x1c)
+#define DMA_CHAN_TX_END_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x20)
+#define DMA_CHAN_RX_END_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x28)
+#define DMA_CHAN_TX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x2c)
+#define DMA_CHAN_RX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x30)
+#define DMA_CHAN_INTR_ENA(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x34)
+#define DMA_CHAN_RX_WATCHDOG(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x38)
+#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
+#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
+#define DMA_CHAN_CUR_RX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
+#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
/* DMA Control X */
#define DMA_CONTROL_SPH BIT(24)
@@ -220,19 +232,31 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
-void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
-void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
-void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
-void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
-void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
-int dwmac4_dma_interrupt(void __iomem *ioaddr,
+void dwmac4_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+void dwmac410_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+void dwmac4_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+void dwmac410_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+void dwmac4_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+void dwmac4_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+void dwmac4_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+void dwmac4_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir);
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
-void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
-void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+void dwmac4_set_rx_ring_len(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 len, u32 chan);
+void dwmac4_set_tx_ring_len(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 len, u32 chan);
+void dwmac4_set_rx_tail_ptr(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 tail_ptr, u32 chan);
+void dwmac4_set_tx_tail_ptr(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 tail_ptr, u32 chan);
#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index d1c605777985..df41eac54058 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -11,6 +11,7 @@
#include "common.h"
#include "dwmac4_dma.h"
#include "dwmac4.h"
+#include "stmmac.h"
int dwmac4_dma_reset(void __iomem *ioaddr)
{
@@ -25,120 +26,151 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
10000, 1000000);
}
-void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+void dwmac4_set_rx_tail_ptr(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 tail_ptr, u32 chan)
{
- writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, chan));
}
-void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+void dwmac4_set_tx_tail_ptr(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 tail_ptr, u32 chan)
{
- writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, chan));
}
-void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
+void dwmac4_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+void dwmac4_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
}
-void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
+void dwmac4_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+void dwmac4_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
}
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+void dwmac4_set_tx_ring_len(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 len, u32 chan)
{
- writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, chan));
}
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+void dwmac4_set_rx_ring_len(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 len, u32 chan)
{
- writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, chan));
}
-void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+void dwmac4_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value |= DMA_CHAN_INTR_DEFAULT_RX;
if (tx)
value |= DMA_CHAN_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
-void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+void dwmac410_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
if (tx)
value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
- writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
-void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+void dwmac4_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value &= ~DMA_CHAN_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_CHAN_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
-void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+void dwmac410_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
if (tx)
value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
- writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
-int dwmac4_dma_interrupt(void __iomem *ioaddr,
+int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
- u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
+ u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -183,7 +215,8 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
x->rx_early_irq++;
- writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan));
+ writel(intr_status & intr_en,
+ ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index acd70b9a3173..72672391675f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -153,14 +153,20 @@
#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
-void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
-void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
-void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
-void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
-int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
- u32 chan, u32 dir);
+void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir);
int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 9b6138b11776..0b6f999a8305 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -32,7 +32,8 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_INTR_ENA);
@@ -44,7 +45,8 @@ void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
writel(value, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_INTR_ENA);
@@ -56,28 +58,30 @@ void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
writel(value, ioaddr + DMA_INTR_ENA);
}
-void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
@@ -154,7 +158,7 @@ static void show_rx_process_state(unsigned int status)
}
#endif
-int dwmac_dma_interrupt(void __iomem *ioaddr,
+int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
int ret = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index c6c4d7948fe5..a0c2ef8bb0ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -187,7 +187,8 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
}
}
-static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
+ struct mac_device_info *hw,
u32 weight, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
@@ -212,7 +213,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
writel(value, ioaddr + reg);
}
-static void dwxgmac2_config_cbs(struct mac_device_info *hw,
+static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
+ struct mac_device_info *hw,
u32 send_slope, u32 idle_slope,
u32 high_credit, u32 low_credit, u32 queue)
{
@@ -276,7 +278,8 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
return ret;
}
-static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
+static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
+ struct mac_device_info *hw, u32 chan)
{
void __iomem *ioaddr = hw->pcsr;
int ret = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index b1f0c3984a09..13c347ee8be9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -8,7 +8,8 @@
#include "common.h"
#include "dwxgmac2.h"
-static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x,
+static int dwxgmac2_get_tx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes3 = le32_to_cpu(p->des3);
@@ -22,7 +23,8 @@ static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
+static int dwxgmac2_get_rx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 5e98355f422b..dfd53264e036 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -33,7 +33,8 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr,
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
}
-static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
+static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
@@ -45,7 +46,8 @@ static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
-static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
+static void dwxgmac2_dma_init_rx_chan(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t phy, u32 chan)
{
@@ -61,7 +63,8 @@ static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
}
-static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
+static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t phy, u32 chan)
{
@@ -131,7 +134,8 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
}
-static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+static void dwxgmac2_dma_dump_regs(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 *reg_space)
{
int i;
@@ -139,8 +143,8 @@ static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
reg_space[i] = readl(ioaddr + i * 4);
}
-static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
- u32 channel, int fifosz, u8 qmode)
+static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int mode, u32 channel, int fifosz, u8 qmode)
{
u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
unsigned int rqs = fifosz / 256 - 1;
@@ -205,8 +209,8 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
}
-static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
- u32 channel, int fifosz, u8 qmode)
+static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int mode, u32 channel, int fifosz, u8 qmode)
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
unsigned int tqs = fifosz / 256 - 1;
@@ -248,7 +252,8 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
-static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+static void dwxgmac2_enable_dma_irq(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{
u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
@@ -261,7 +266,8 @@ static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
-static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+static void dwxgmac2_disable_dma_irq(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan,
bool rx, bool tx)
{
u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
@@ -274,7 +280,8 @@ static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
}
-static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_dma_start_tx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan)
{
u32 value;
@@ -287,7 +294,8 @@ static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
u32 value;
@@ -300,7 +308,8 @@ static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_dma_start_rx(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 chan)
{
u32 value;
@@ -313,7 +322,8 @@ static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+static void dwxgmac2_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan)
{
u32 value;
@@ -322,7 +332,8 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
-static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
+static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
@@ -449,32 +460,38 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
return 0;
}
-static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
+static void dwxgmac2_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 riwt, u32 queue)
{
writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
}
-static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+static void dwxgmac2_set_rx_ring_len(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 len, u32 chan)
{
writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
}
-static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+static void dwxgmac2_set_tx_ring_len(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 len, u32 chan)
{
writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
}
-static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+static void dwxgmac2_set_rx_tail_ptr(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 ptr, u32 chan)
{
writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
}
-static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+static void dwxgmac2_set_tx_tail_ptr(struct stmmac_priv *priv,
+ void __iomem *ioaddr, u32 ptr, u32 chan)
{
writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
}
-static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+static void dwxgmac2_enable_tso(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan)
{
u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
@@ -486,7 +503,8 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
}
-static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+static void dwxgmac2_qmode(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 channel, u8 qmode)
{
u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
@@ -503,7 +521,8 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
}
-static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+static void dwxgmac2_set_bfsize(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int bfsize, u32 chan)
{
u32 value;
@@ -513,7 +532,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
-static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+static void dwxgmac2_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan)
{
u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
@@ -529,7 +549,8 @@ static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
}
-static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+static int dwxgmac2_enable_tbs(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan)
{
u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 1bcbbd724fb5..a91d8f13a931 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -12,10 +12,10 @@
#include "common.h"
#include "descs_com.h"
-static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+static int enh_desc_get_tx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- struct net_device_stats *stats = (struct net_device_stats *)data;
unsigned int tdes0 = le32_to_cpu(p->des0);
int ret = tx_done;
@@ -117,7 +117,8 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
+static void enh_desc_get_ext_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
unsigned int rdes0 = le32_to_cpu(p->basic.des0);
@@ -181,10 +182,10 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
}
}
-static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+static int enh_desc_get_rx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- struct net_device_stats *stats = (struct net_device_stats *)data;
unsigned int rdes0 = le32_to_cpu(p->des0);
int ret = good_frame;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 1cc286b000b6..6ee7cf07cfd7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -26,6 +26,7 @@
})
struct stmmac_extra_stats;
+struct stmmac_priv;
struct stmmac_safety_stats;
struct dma_desc;
struct dma_extended_desc;
@@ -56,8 +57,9 @@ struct stmmac_desc_ops {
/* Last tx segment reports the transmit status */
int (*get_tx_ls)(struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
- int (*tx_status)(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, void __iomem *ioaddr);
+ int (*tx_status)(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr);
/* Get the buffer size from the descriptor */
int (*get_tx_len)(struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
@@ -65,10 +67,12 @@ struct stmmac_desc_ops {
/* Get the receive frame size */
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
/* Return the reception status looking at the RDES1 */
- int (*rx_status)(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p);
- void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x,
- struct dma_extended_desc *p);
+ int (*rx_status)(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+ void (*rx_extended_status)(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p);
/* Set tx timestamp enable bit */
void (*enable_tx_timestamp) (struct dma_desc *p);
/* get tx timestamp status */
@@ -168,108 +172,125 @@ struct stmmac_dma_ops {
int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
int atds);
- void (*init_chan)(void __iomem *ioaddr,
+ void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
- void (*init_rx_chan)(void __iomem *ioaddr,
+ void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t phy, u32 chan);
- void (*init_tx_chan)(void __iomem *ioaddr,
+ void (*init_tx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
dma_addr_t phy, u32 chan);
/* Configure the AXI Bus Mode Register */
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
- void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
- void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
- int fifosz, u8 qmode);
- void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ void (*dump_regs)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 *reg_space);
+ void (*dma_rx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int mode, u32 channel,
int fifosz, u8 qmode);
+ void (*dma_tx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int mode, u32 channel, int fifosz, u8 qmode);
/* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- void __iomem *ioaddr);
+ void (*dma_diagnostic_fr)(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
- bool rx, bool tx);
- void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
- bool rx, bool tx);
- void (*start_tx)(void __iomem *ioaddr, u32 chan);
- void (*stop_tx)(void __iomem *ioaddr, u32 chan);
- void (*start_rx)(void __iomem *ioaddr, u32 chan);
- void (*stop_rx)(void __iomem *ioaddr, u32 chan);
- int (*dma_interrupt) (void __iomem *ioaddr,
- struct stmmac_extra_stats *x, u32 chan, u32 dir);
+ void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+ void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan, bool rx, bool tx);
+ void (*start_tx)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+ void (*stop_tx)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+ void (*start_rx)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+ void (*stop_rx)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 chan);
+ int (*dma_interrupt)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir);
/* If supported then get the optional core features */
int (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
- void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
- void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
- void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
- void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
- void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
- void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
- void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
- void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
- void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
- int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*rx_watchdog)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 riwt, u32 queue);
+ void (*set_tx_ring_len)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 len, u32 chan);
+ void (*set_rx_ring_len)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 len, u32 chan);
+ void (*set_rx_tail_ptr)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 tail_ptr, u32 chan);
+ void (*set_tx_tail_ptr)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 tail_ptr, u32 chan);
+ void (*enable_tso)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan);
+ void (*qmode)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 channel, u8 qmode);
+ void (*set_bfsize)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ int bfsize, u32 chan);
+ void (*enable_sph)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan);
+ int (*enable_tbs)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ bool en, u32 chan);
};
#define stmmac_dma_init(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, init, __args)
#define stmmac_init_chan(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, init_chan, __args)
+ stmmac_do_void_callback(__priv, dma, init_chan, __priv, __args)
#define stmmac_init_rx_chan(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, init_rx_chan, __args)
+ stmmac_do_void_callback(__priv, dma, init_rx_chan, __priv, __args)
#define stmmac_init_tx_chan(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, init_tx_chan, __args)
+ stmmac_do_void_callback(__priv, dma, init_tx_chan, __priv, __args)
#define stmmac_axi(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, axi, __args)
#define stmmac_dump_dma_regs(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, dump_regs, __args)
+ stmmac_do_void_callback(__priv, dma, dump_regs, __priv, __args)
#define stmmac_dma_rx_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args)
+ stmmac_do_void_callback(__priv, dma, dma_rx_mode, __priv, __args)
#define stmmac_dma_tx_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args)
+ stmmac_do_void_callback(__priv, dma, dma_tx_mode, __priv, __args)
#define stmmac_dma_diagnostic_fr(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
#define stmmac_enable_dma_transmission(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
#define stmmac_enable_dma_irq(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args)
+ stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args)
#define stmmac_disable_dma_irq(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args)
+ stmmac_do_void_callback(__priv, dma, disable_dma_irq, __priv, __args)
#define stmmac_start_tx(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, start_tx, __args)
+ stmmac_do_void_callback(__priv, dma, start_tx, __priv, __args)
#define stmmac_stop_tx(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, stop_tx, __args)
+ stmmac_do_void_callback(__priv, dma, stop_tx, __priv, __args)
#define stmmac_start_rx(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, start_rx, __args)
+ stmmac_do_void_callback(__priv, dma, start_rx, __priv, __args)
#define stmmac_stop_rx(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, stop_rx, __args)
+ stmmac_do_void_callback(__priv, dma, stop_rx, __priv, __args)
#define stmmac_dma_interrupt_status(__priv, __args...) \
- stmmac_do_callback(__priv, dma, dma_interrupt, __args)
+ stmmac_do_callback(__priv, dma, dma_interrupt, __priv, __args)
#define stmmac_get_hw_feature(__priv, __args...) \
stmmac_do_callback(__priv, dma, get_hw_feature, __args)
#define stmmac_rx_watchdog(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
+ stmmac_do_void_callback(__priv, dma, rx_watchdog, __priv, __args)
#define stmmac_set_tx_ring_len(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args)
+ stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __priv, __args)
#define stmmac_set_rx_ring_len(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args)
+ stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __priv, __args)
#define stmmac_set_rx_tail_ptr(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args)
+ stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __priv, __args)
#define stmmac_set_tx_tail_ptr(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
+ stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __priv, __args)
#define stmmac_enable_tso(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+ stmmac_do_void_callback(__priv, dma, enable_tso, __priv, __args)
#define stmmac_dma_qmode(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, qmode, __args)
+ stmmac_do_void_callback(__priv, dma, qmode, __priv, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
+ stmmac_do_void_callback(__priv, dma, set_bfsize, __priv, __args)
#define stmmac_enable_sph(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, enable_sph, __args)
+ stmmac_do_void_callback(__priv, dma, enable_sph, __priv, __args)
#define stmmac_enable_tbs(__priv, __args...) \
- stmmac_do_callback(__priv, dma, enable_tbs, __args)
+ stmmac_do_callback(__priv, dma, enable_tbs, __priv, __args)
struct mac_device_info;
struct net_device;
@@ -301,21 +322,23 @@ struct stmmac_ops {
/* Program TX Algorithms */
void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
/* Set MTL TX queues weight */
- void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+ void (*set_mtl_tx_queue_weight)(struct stmmac_priv *priv,
+ struct mac_device_info *hw,
u32 weight, u32 queue);
/* RX MTL queue to RX dma mapping */
void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
/* Configure AV Algorithm */
- void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
- u32 idle_slope, u32 high_credit, u32 low_credit,
- u32 queue);
+ void (*config_cbs)(struct stmmac_priv *priv, struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope, u32 high_credit,
+ u32 low_credit, u32 queue);
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */
int (*host_irq_status)(struct mac_device_info *hw,
struct stmmac_extra_stats *x);
/* Handle MTL interrupts */
- int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
+ int (*host_mtl_irq_status)(struct stmmac_priv *priv,
+ struct mac_device_info *hw, u32 chan);
/* Multicast filter setting */
void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
/* Flow control setting */
@@ -335,8 +358,9 @@ struct stmmac_ops {
void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
- u32 rx_queues, u32 tx_queues);
+ void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 rx_queues,
+ u32 tx_queues);
/* PCS calls */
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
bool loopback);
@@ -416,17 +440,17 @@ struct stmmac_ops {
#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args)
#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args)
+ stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __priv, __args)
#define stmmac_map_mtl_to_dma(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args)
#define stmmac_config_cbs(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, config_cbs, __args)
+ stmmac_do_void_callback(__priv, mac, config_cbs, __priv, __args)
#define stmmac_dump_mac_regs(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, dump_regs, __args)
#define stmmac_host_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, host_irq_status, __args)
#define stmmac_host_mtl_irq_status(__priv, __args...) \
- stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args)
+ stmmac_do_callback(__priv, mac, host_mtl_irq_status, __priv, __args)
#define stmmac_set_filter(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_filter, __args)
#define stmmac_flow_ctrl(__priv, __args...) \
@@ -448,11 +472,11 @@ struct stmmac_ops {
#define stmmac_set_eee_pls(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_pls, __args)
#define stmmac_mac_debug(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, debug, __args)
+ stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
#define stmmac_pcs_rane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_rane, __args)
+ stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args)
#define stmmac_pcs_get_adv_lp(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
@@ -500,8 +524,6 @@ struct stmmac_ops {
#define stmmac_fpe_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
-struct stmmac_priv;
-
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
@@ -533,16 +555,20 @@ struct stmmac_hwtimestamp {
#define stmmac_timestamp_interrupt(__priv, __args...) \
stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
+struct stmmac_tx_queue;
+struct stmmac_rx_queue;
+
/* Helpers to manage the descriptors for chain and ring modes */
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
+ int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
+ int csum);
int (*set_16kib_bfsize)(int mtu);
void (*init_desc3)(struct dma_desc *p);
- void (*refill_desc3) (void *priv, struct dma_desc *p);
- void (*clean_desc3) (void *priv, struct dma_desc *p);
+ void (*refill_desc3)(struct stmmac_rx_queue *rx_q, struct dma_desc *p);
+ void (*clean_desc3)(struct stmmac_tx_queue *tx_q, struct dma_desc *p);
};
#define stmmac_mode_init(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index e3da4da242ee..350e6670a576 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -12,10 +12,10 @@
#include "common.h"
#include "descs_com.h"
-static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+static int ndesc_get_tx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- struct net_device_stats *stats = (struct net_device_stats *)data;
unsigned int tdes0 = le32_to_cpu(p->des0);
unsigned int tdes1 = le32_to_cpu(p->des1);
int ret = tx_done;
@@ -70,12 +70,12 @@ static int ndesc_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns good_frame because the GMAC device
* is supposed to be able to compute the csum in HW. */
-static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+static int ndesc_get_rx_status(struct net_device_stats *stats,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
unsigned int rdes0 = le32_to_cpu(p->des0);
- struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 2b5b17d8b8a0..d218412ca832 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -14,9 +14,9 @@
#include "stmmac.h"
-static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
+ int csum)
{
- struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->cur_tx;
@@ -101,9 +101,8 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void refill_desc3(void *priv_ptr, struct dma_desc *p)
+static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
{
- struct stmmac_rx_queue *rx_q = priv_ptr;
struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
@@ -117,9 +116,8 @@ static void init_desc3(struct dma_desc *p)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}
-static void clean_desc3(void *priv_ptr, struct dma_desc *p)
+static void clean_desc3(struct stmmac_tx_queue *tx_q, struct dma_desc *p)
{
- struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
struct stmmac_priv *priv = tx_q->priv_data;
unsigned int entry = tx_q->dirty_tx;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 3d15e1e92e18..07ea5ab0a60b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -92,6 +92,13 @@ struct stmmac_rx_buffer {
dma_addr_t sec_addr;
};
+struct stmmac_xdp_buff {
+ struct xdp_buff xdp;
+ struct stmmac_priv *priv;
+ struct dma_desc *desc;
+ struct dma_desc *ndesc;
+};
+
struct stmmac_rx_queue {
u32 rx_count_frames;
u32 queue_index;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 35c8dd92d369..2ae73ab842d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -393,19 +393,10 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII) {
- u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
-
/* Only support ANE */
if (cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
- mask &= (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full);
-
mutex_lock(&priv->lock);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
mutex_unlock(&priv->lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 17310ade88dd..f116e4ae293b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1134,20 +1134,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
int ret;
+ if (!phylink_expects_phy(priv->phylink))
+ return 0;
+
fwnode = of_fwnode_handle(priv->plat->phylink_node);
if (!fwnode)
fwnode = dev_fwnode(priv->device);
if (fwnode)
- ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ else
+ phy_fwnode = NULL;
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!fwnode || ret) {
+ if (!phy_fwnode || IS_ERR(phy_fwnode)) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1163,6 +1169,9 @@ static int stmmac_init_phy(struct net_device *dev)
}
ret = phylink_connect_phy(priv->phylink, phydev);
+ } else {
+ fwnode_handle_put(phy_fwnode);
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
if (!priv->plat->pmt) {
@@ -1605,6 +1614,12 @@ static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
+ /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
+ * in struct xdp_buff_xsk to stash driver specific information. Thus,
+ * use this macro to make sure no size violations.
+ */
+ XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
+
for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
@@ -4989,6 +5004,16 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return ret;
}
+static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
+{
+ /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
+ * to represent incoming packet, whereas cb field in the same structure
+ * is used to store driver specific info. Thus, struct stmmac_xdp_buff
+ * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
+ */
+ return (struct stmmac_xdp_buff *)xdp;
+}
+
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
@@ -5018,6 +5043,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
}
while (count < limit) {
struct stmmac_rx_buffer *buf;
+ struct stmmac_xdp_buff *ctx;
unsigned int buf1_len = 0;
struct dma_desc *np, *p;
int entry;
@@ -5103,6 +5129,11 @@ read_again:
goto read_again;
}
+ ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
+ ctx->priv = priv;
+ ctx->desc = p;
+ ctx->ndesc = np;
+
/* XDP ZC Frame only support primary buffers for now */
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
@@ -5181,7 +5212,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
enum dma_data_direction dma_dir;
unsigned int desc_size;
struct sk_buff *skb = NULL;
- struct xdp_buff xdp;
+ struct stmmac_xdp_buff ctx;
int xdp_status = 0;
int buf_sz;
@@ -5302,17 +5333,22 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
- xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
- xdp_prepare_buff(&xdp, page_address(buf->page),
- buf->page_offset, buf1_len, false);
+ xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, true);
- pre_len = xdp.data_end - xdp.data_hard_start -
+ pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
buf->page_offset;
- skb = stmmac_xdp_run_prog(priv, &xdp);
+
+ ctx.priv = priv;
+ ctx.desc = p;
+ ctx.ndesc = np;
+
+ skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
/* Due xdp_adjust_tail: DMA sync for_device
* cover max len CPU touch
*/
- sync_len = xdp.data_end - xdp.data_hard_start -
+ sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
buf->page_offset;
sync_len = max(sync_len, pre_len);
@@ -5322,7 +5358,7 @@ read_again:
if (xdp_res & STMMAC_XDP_CONSUMED) {
page_pool_put_page(rx_q->page_pool,
- virt_to_head_page(xdp.data),
+ virt_to_head_page(ctx.xdp.data),
sync_len, true);
buf->page = NULL;
priv->dev->stats.rx_dropped++;
@@ -5350,7 +5386,7 @@ read_again:
if (!skb) {
/* XDP program may expand or reduce tail */
- buf1_len = xdp.data_end - xdp.data;
+ buf1_len = ctx.xdp.data_end - ctx.xdp.data;
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
@@ -5360,7 +5396,7 @@ read_again:
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+ skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
@@ -6341,6 +6377,10 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6348,16 +6388,18 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
ret = stmmac_vlan_update(priv, is_double);
if (ret) {
clear_bit(vid, priv->active_vlans);
- return ret;
+ goto err_pm_put;
}
if (priv->hw->num_vlan) {
ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
if (ret)
- return ret;
+ goto err_pm_put;
}
+err_pm_put:
+ pm_runtime_put(priv->device);
- return 0;
+ return ret;
}
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -6622,6 +6664,8 @@ int stmmac_xdp_open(struct net_device *dev)
goto init_error;
}
+ stmmac_reset_queues_param(priv);
+
/* DMA CSR Channel configuration */
for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
@@ -6948,7 +6992,7 @@ static void stmmac_napi_del(struct net_device *dev)
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
+ int ret = 0, i;
if (netif_running(dev))
stmmac_release(dev);
@@ -6957,6 +7001,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->plat->rx_queues_to_use = rx_cnt;
priv->plat->tx_queues_to_use = tx_cnt;
+ if (!netif_is_rxfh_configured(dev))
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i,
+ rx_cnt);
stmmac_napi_add(dev);
@@ -7045,6 +7093,37 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
}
}
+static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct stmmac_xdp_buff *ctx = (void *)_ctx;
+ struct dma_desc *desc_contains_ts = ctx->desc;
+ struct stmmac_priv *priv = ctx->priv;
+ struct dma_desc *ndesc = ctx->ndesc;
+ struct dma_desc *desc = ctx->desc;
+ u64 ns = 0;
+
+ if (!priv->hwts_rx_en)
+ return -ENODATA;
+
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ desc_contains_ts = ndesc;
+
+ /* Check if timestamp is available */
+ if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
+ ns -= priv->plat->cdc_error_adj;
+ *timestamp = ns_to_ktime(ns);
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
+ .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
+};
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -7152,6 +7231,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
+ ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
@@ -7238,6 +7319,10 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.rssen && priv->plat->rss_en)
ndev->features |= NETIF_F_RXHASH;
+ ndev->vlan_features |= ndev->features;
+ /* TSO doesn't work on VLANs yet */
+ ndev->vlan_features &= ~NETIF_F_TSO;
+
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if (priv->plat->has_xgmac)
@@ -7260,6 +7345,8 @@ int stmmac_dvr_probe(struct device *device,
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
/* Setup channels NAPI */
stmmac_napi_add(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 21aaa2730ac8..6807c4c1a0a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -281,9 +281,8 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->has_gmac4)
value |= MII_GMAC4_READ;
- }
data = stmmac_mdio_read(priv, data, value);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 067a40fe0a23..eb0b2898daa3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -519,7 +519,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a")) {
+ of_device_is_compatible(np, "snps,dwmac-5.10a") ||
+ of_device_is_compatible(np, "snps,dwmac-5.20")) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab8b09a9ef61..7a2e76776297 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
err = niu_rbr_fill(np, rp, GFP_KERNEL);
if (err)
- return err;
+ goto out_err;
}
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ec85aef35bf9..b93613cd1994 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -14,9 +14,6 @@
* argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
*/
-#include <asm/byteorder.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
#include <linux/bitops.h>
#include <linux/crc32.h>
#include <linux/delay.h>
@@ -45,6 +42,10 @@
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
#ifdef CONFIG_SPARC
#include <asm/auxio.h>
#include <asm/idprom.h>
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index d17757ecbf42..11cbcd9e2c72 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1506,9 +1506,21 @@ static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned in
struct am65_cpsw_common *common = port->common;
if (common->pdata.extra_modes & BIT(state->interface)) {
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
writel(ADVERTISE_SGMII,
port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
+ } else {
+ cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ cpsw_sl_ctl_set(port->slave.mac_sl,
+ CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
+ } else {
+ cpsw_sl_ctl_clr(port->slave.mac_sl,
+ CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
+ }
writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
@@ -1523,6 +1535,7 @@ static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned
struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
struct am65_cpsw_common *common = port->common;
struct net_device *ndev = port->ndev;
+ u32 mac_control;
int tmo;
/* disable forwarding */
@@ -1534,7 +1547,14 @@ static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned
dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
- cpsw_sl_ctl_reset(port->slave.mac_sl);
+ /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */
+ mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A |
+ CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN;
+ /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */
+ if (phy_interface_mode_is_rgmii(interface))
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */
+ cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control);
am65_cpsw_qos_link_down(ndev);
netif_tx_stop_all_queues(ndev);
@@ -1551,10 +1571,12 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy
u32 mac_control = CPSW_SL_CTL_GMII_EN;
struct net_device *ndev = port->ndev;
+ /* Bring the port out of idle state */
+ cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
if (speed == SPEED_1000)
mac_control |= CPSW_SL_CTL_GIG;
- if (interface == PHY_INTERFACE_MODE_SGMII)
- mac_control |= CPSW_SL_CTL_EXT_EN;
+ /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */
if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
/* Can be used with in band mode only */
mac_control |= CPSW_SL_CTL_EXT_EN;
@@ -2157,7 +2179,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
/* Configuring Phylink */
port->slave.phylink_config.dev = &port->ndev->dev;
port->slave.phylink_config.type = PHYLINK_NETDEV;
- port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+ port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD | MAC_5000FD;
port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
switch (port->slave.phy_if) {
@@ -2175,6 +2198,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
__set_bit(port->slave.phy_if,
port->slave.phylink_config.supported_interfaces);
@@ -2796,12 +2820,20 @@ static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
};
+static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
{ .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
+ { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2957,7 +2989,8 @@ err_free_phylink:
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
err_of_clear:
- of_platform_device_destroy(common->mdio_dev, NULL);
+ if (common->mdio_dev)
+ of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -2987,7 +3020,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
- of_platform_device_destroy(common->mdio_dev, NULL);
+ if (common->mdio_dev)
+ of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 37f0b62ec5d6..f9cd566d1c9b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -27,7 +27,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 35128dd45ffc..c61e4e44a78f 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/irqreturn.h>
@@ -23,7 +24,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index eb89a274083e..1e8d8b7b0c62 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1798,10 +1798,13 @@ static int wx_setup_rx_resources(struct wx_ring *rx_ring)
ret = wx_alloc_page_pool(rx_ring);
if (ret < 0) {
dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
- goto err;
+ goto err_desc;
}
return 0;
+
+err_desc:
+ dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma);
err:
kvfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c
index 37a89675dbeb..314623650a84 100644
--- a/drivers/net/fddi/skfp/rmt.c
+++ b/drivers/net/fddi/skfp/rmt.c
@@ -234,9 +234,9 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
if (smc->r.rm_join) {
smc->r.sm_ma_avail = TRUE ;
if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable)
- smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE ;
- else
- smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE;
+ else
+ smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE;
}
DB_RMTN(1, "RMT : RING UP");
RS_CLEAR(smc,RS_NORINGOP) ;
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index a9c44f08199d..a94c7bd5db2e 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -47,7 +47,7 @@ config BPQETHER
config SCC
tristate "Z8530 SCC driver"
- depends on ISA && AX25 && ISA_DMA_API
+ depends on ISA && AX25
help
These cards are used to connect your Linux box to an amateur radio
in order to communicate with other computers. If you want to use
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 25616247d7a5..3427993f94f7 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1021,8 +1021,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- if (md_dst && md_dst->type == METADATA_MACSEC &&
- (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
+ struct macsec_rx_sc *rx_sc = NULL;
+
+ if (md_dst && md_dst->type == METADATA_MACSEC)
+ rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+
+ if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
continue;
if (ether_addr_equal_64bits(hdr->h_dest,
@@ -1047,7 +1051,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
nskb->pkt_type = PACKET_MULTICAST;
__netif_rx(nskb);
+ } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
}
+
continue;
}
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 90309980686e..9ff2e6f22f3f 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -65,6 +65,7 @@ config MDIO_ASPEED
tristate "ASPEED MDIO bus controller"
depends on ARCH_ASPEED || COMPILE_TEST
depends on OF_MDIO && HAS_IOMEM
+ depends on MDIO_DEVRES
help
This module provides a driver for the independent MDIO bus
controllers found in the ASPEED AST2600 SoC. This is a driver for the
@@ -170,6 +171,7 @@ config MDIO_IPQ4019
tristate "Qualcomm IPQ4019 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on COMMON_CLK
+ depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in Qualcomm
IPQ40xx, IPQ60xx, IPQ807x and IPQ50xx series Soc-s.
@@ -178,6 +180,7 @@ config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on MFD_SYSCON
+ depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in the network
interface units of the IPQ8064 SoC
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6b9525def973..513675ae4dd2 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -44,6 +44,14 @@ config LED_TRIGGER_PHY
<Speed in megabits>Mbps OR <Speed in gigabits>Gbps OR link
for any speed known to the PHY.
+config PHYLIB_LEDS
+ bool "Support probing LEDs from device tree"
+ depends on LEDS_CLASS=y || LEDS_CLASS=PHYLIB
+ depends on OF
+ default y
+ help
+ When LED class support is enabled, phylib can automatically
+ probe LED setting from device tree.
config FIXED_PHY
tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
diff --git a/drivers/net/phy/aquantia_hwmon.c b/drivers/net/phy/aquantia_hwmon.c
index 19c4c280a6cd..0da451e46f69 100644
--- a/drivers/net/phy/aquantia_hwmon.c
+++ b/drivers/net/phy/aquantia_hwmon.c
@@ -210,7 +210,7 @@ static const struct hwmon_channel_info aqr_hwmon_temp = {
.config = aqr_hwmon_temp_config,
};
-static const struct hwmon_channel_info *aqr_hwmon_info[] = {
+static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
&aqr_hwmon_chip,
&aqr_hwmon_temp,
NULL,
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index d8f3024860dc..d43076592f81 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -364,7 +364,7 @@ static int bcm54140_hwmon_write(struct device *dev,
}
}
-static const struct hwmon_channel_info *bcm54140_hwmon_info[] = {
+static const struct hwmon_channel_info * const bcm54140_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
HWMON_T_ALARM),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 63a3644d86c9..cd5d0ed9c5e5 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -144,11 +144,15 @@
/* WOL Event Interrupt Enable */
#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
-/* LED Timer Control Register */
-#define MII_88E1318S_PHY_LED_TCR 0x12
-#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
-#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
-#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11)
+#define MII_88E1318S_PHY_LED_FUNC 0x10
+#define MII_88E1318S_PHY_LED_FUNC_OFF (0x8)
+#define MII_88E1318S_PHY_LED_FUNC_ON (0x9)
+#define MII_88E1318S_PHY_LED_FUNC_HI_Z (0xa)
+#define MII_88E1318S_PHY_LED_FUNC_BLINK (0xb)
+#define MII_88E1318S_PHY_LED_TCR 0x12
+#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
+#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
+#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11)
/* Magic Packet MAC address registers */
#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17
@@ -2735,7 +2739,7 @@ static const struct hwmon_channel_info marvell_hwmon_temp = {
.config = marvell_hwmon_temp_config,
};
-static const struct hwmon_channel_info *marvell_hwmon_info[] = {
+static const struct hwmon_channel_info * const marvell_hwmon_info[] = {
&marvell_hwmon_chip,
&marvell_hwmon_temp,
NULL
@@ -2832,6 +2836,63 @@ static int marvell_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int m88e1318_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ int reg;
+
+ reg = phy_read_paged(phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1318S_PHY_LED_FUNC);
+ if (reg < 0)
+ return reg;
+
+ switch (index) {
+ case 0:
+ case 1:
+ case 2:
+ reg &= ~(0xf << (4 * index));
+ if (value == LED_OFF)
+ reg |= MII_88E1318S_PHY_LED_FUNC_OFF << (4 * index);
+ else
+ reg |= MII_88E1318S_PHY_LED_FUNC_ON << (4 * index);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1318S_PHY_LED_FUNC, reg);
+}
+
+static int m88e1318_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int reg;
+
+ reg = phy_read_paged(phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1318S_PHY_LED_FUNC);
+ if (reg < 0)
+ return reg;
+
+ switch (index) {
+ case 0:
+ case 1:
+ case 2:
+ reg &= ~(0xf << (4 * index));
+ reg |= MII_88E1318S_PHY_LED_FUNC_BLINK << (4 * index);
+ /* Reset default is 84ms */
+ *delay_on = 84 / 2;
+ *delay_off = 84 / 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1318S_PHY_LED_FUNC, reg);
+}
+
static int marvell_probe(struct phy_device *phydev)
{
struct marvell_priv *priv;
@@ -3081,6 +3142,8 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .led_brightness_set = m88e1318_led_brightness_set,
+ .led_blink_set = m88e1318_led_blink_set,
},
{
.phy_id = MARVELL_PHY_ID_88E1145,
@@ -3187,6 +3250,8 @@ static struct phy_driver marvell_drivers[] = {
.cable_test_start = marvell_vct7_cable_test_start,
.cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
.cable_test_get_status = marvell_vct7_cable_test_get_status,
+ .led_brightness_set = m88e1318_led_brightness_set,
+ .led_blink_set = m88e1318_led_blink_set,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
@@ -3213,6 +3278,8 @@ static struct phy_driver marvell_drivers[] = {
.cable_test_start = marvell_vct7_cable_test_start,
.cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
.cable_test_get_status = marvell_vct7_cable_test_get_status,
+ .led_brightness_set = m88e1318_led_brightness_set,
+ .led_blink_set = m88e1318_led_blink_set,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -3239,6 +3306,8 @@ static struct phy_driver marvell_drivers[] = {
.cable_test_start = marvell_vct7_cable_test_start,
.cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
.cable_test_get_status = marvell_vct7_cable_test_get_status,
+ .led_brightness_set = m88e1318_led_brightness_set,
+ .led_blink_set = m88e1318_led_blink_set,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -3380,6 +3449,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .led_brightness_set = m88e1318_led_brightness_set,
+ .led_blink_set = m88e1318_led_blink_set,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 383a9c9f36e5..55d9d7acc32e 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -243,7 +243,7 @@ static const struct hwmon_channel_info mv3310_hwmon_temp = {
.config = mv3310_hwmon_temp_config,
};
-static const struct hwmon_channel_info *mv3310_hwmon_info[] = {
+static const struct hwmon_channel_info * const mv3310_hwmon_info[] = {
&mv3310_hwmon_chip,
&mv3310_hwmon_temp,
NULL,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2184b1e859ae..3f81bb8dac44 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -10,13 +10,13 @@
* Copyright (c) 2014 Johan Hovold <johan@kernel.org>
*
* Support : Micrel Phys:
- * Giga phys: ksz9021, ksz9031, ksz9131
+ * Giga phys: ksz9021, ksz9031, ksz9131, lan8841, lan8814
* 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
* ksz8021, ksz8031, ksz8051,
* ksz8081, ksz8091,
* ksz8061,
* Switch : ksz8873, ksz886x
- * ksz9477
+ * ksz9477, lan8804
*/
#include <linux/bitfield.h>
@@ -3761,7 +3761,7 @@ static int lan8841_ptp_update_target(struct kszphy_ptp_priv *ptp_priv,
const struct timespec64 *ts)
{
return lan8841_ptp_set_target(ptp_priv, LAN8841_EVENT_A,
- ts->tv_sec + LAN8841_BUFFER_TIME, ts->tv_nsec);
+ ts->tv_sec + LAN8841_BUFFER_TIME, 0);
}
#define LAN8841_PTP_LTC_TARGET_RELOAD_SEC_HI(event) ((event) == LAN8841_EVENT_A ? 282 : 292)
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 8e6bb97b5f85..6301a9abfb95 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -182,7 +182,7 @@ static umode_t gpy_hwmon_is_visible(const void *data,
return 0444;
}
-static const struct hwmon_channel_info *gpy_hwmon_info[] = {
+static const struct hwmon_channel_info * const gpy_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 5813b07242ce..029875a59ff8 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -191,7 +191,7 @@
#define MAX_ID_PS 2260U
#define DEFAULT_ID_PS 2000U
-#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
+#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK_ULL(31, 0) * (ppb) * \
PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
@@ -1337,6 +1337,17 @@ no_ptp_support:
return ret;
}
+static void nxp_c45_remove(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+
+ if (priv->ptp_clock)
+ ptp_clock_unregister(priv->ptp_clock);
+
+ skb_queue_purge(&priv->tx_queue);
+ skb_queue_purge(&priv->rx_queue);
+}
+
static struct phy_driver nxp_c45_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
@@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
.set_loopback = genphy_c45_loopback,
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
},
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index ec91e671f8aa..b13e15310feb 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -477,7 +477,7 @@ static umode_t tja11xx_hwmon_is_visible(const void *data,
return 0;
}
-static const struct hwmon_channel_info *tja11xx_hwmon_info[] = {
+static const struct hwmon_channel_info * const tja11xx_hwmon_info[] = {
HWMON_CHANNEL_INFO(in, HWMON_I_LCRIT_ALARM),
HWMON_CHANNEL_INFO(temp, HWMON_T_CRIT_ALARM),
NULL
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 917ba84105fc..d373446ab5ac 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -19,10 +19,12 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
@@ -674,6 +676,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
device_initialize(&mdiodev->dev);
dev->state = PHY_DOWN;
+ INIT_LIST_HEAD(&dev->leds);
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
@@ -2988,6 +2991,101 @@ static bool phy_drv_supports_irq(struct phy_driver *phydrv)
return phydrv->config_intr && phydrv->handle_interrupt;
}
+static int phy_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct phy_led *phyled = to_phy_led(led_cdev);
+ struct phy_device *phydev = phyled->phydev;
+ int err;
+
+ mutex_lock(&phydev->lock);
+ err = phydev->drv->led_brightness_set(phydev, phyled->index, value);
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+
+static int phy_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct phy_led *phyled = to_phy_led(led_cdev);
+ struct phy_device *phydev = phyled->phydev;
+ int err;
+
+ mutex_lock(&phydev->lock);
+ err = phydev->drv->led_blink_set(phydev, phyled->index,
+ delay_on, delay_off);
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+
+static int of_phy_led(struct phy_device *phydev,
+ struct device_node *led)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct led_init_data init_data = {};
+ struct led_classdev *cdev;
+ struct phy_led *phyled;
+ int err;
+
+ phyled = devm_kzalloc(dev, sizeof(*phyled), GFP_KERNEL);
+ if (!phyled)
+ return -ENOMEM;
+
+ cdev = &phyled->led_cdev;
+ phyled->phydev = phydev;
+
+ err = of_property_read_u8(led, "reg", &phyled->index);
+ if (err)
+ return err;
+
+ if (phydev->drv->led_brightness_set)
+ cdev->brightness_set_blocking = phy_led_set_brightness;
+ if (phydev->drv->led_blink_set)
+ cdev->blink_set = phy_led_blink_set;
+ cdev->max_brightness = 1;
+ init_data.devicename = dev_name(&phydev->mdio.dev);
+ init_data.fwnode = of_fwnode_handle(led);
+ init_data.devname_mandatory = true;
+
+ err = devm_led_classdev_register_ext(dev, cdev, &init_data);
+ if (err)
+ return err;
+
+ list_add(&phyled->list, &phydev->leds);
+
+ return 0;
+}
+
+static int of_phy_leds(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *leds, *led;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node(leds, led) {
+ err = of_phy_led(phydev, led);
+ if (err) {
+ of_node_put(led);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
/**
* fwnode_mdio_find_device - Given a fwnode, find the mdio_device
* @fwnode: pointer to the mdio_device's fwnode
@@ -3183,6 +3281,12 @@ static int phy_probe(struct device *dev)
/* Set the state to READY by default */
phydev->state = PHY_READY;
+ /* Get the LEDs from the device tree, and instantiate standard
+ * LEDs for them.
+ */
+ if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
+ err = of_phy_leds(phydev);
+
out:
/* Re-assert the reset signal on error */
if (err)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index f7da96f0c75b..a4111f1be375 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1583,6 +1583,25 @@ void phylink_destroy(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_destroy);
+/**
+ * phylink_expects_phy() - Determine if phylink expects a phy to be attached
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * When using fixed-link mode, or in-band mode with 1000base-X or 2500base-X,
+ * no PHY is needed.
+ *
+ * Returns true if phylink will be expecting a PHY.
+ */
+bool phylink_expects_phy(struct phylink *pl)
+{
+ if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
+ (pl->cfg_link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(pl->link_config.interface)))
+ return false;
+ return true;
+}
+EXPORT_SYMBOL_GPL(phylink_expects_phy);
+
static void phylink_phy_change(struct phy_device *phydev, bool up)
{
struct phylink *pl = phydev->phylink;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index f0fcb06fbe82..89636dc71e48 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -210,6 +210,12 @@ static const enum gpiod_flags gpio_flags[] = {
#define SFP_PHY_ADDR 22
#define SFP_PHY_ADDR_ROLLBALL 17
+/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
+ * at a time. Some SFP modules and also some Linux I2C drivers do not like
+ * reads longer than 16 bytes.
+ */
+#define SFP_EEPROM_BLOCK_SIZE 16
+
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -406,6 +412,10 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+ // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+ // 2600MBd in their EERPOM
+ SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
+
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
// their EEPROM
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
@@ -1378,7 +1388,7 @@ static const struct hwmon_ops sfp_hwmon_ops = {
.read_string = sfp_hwmon_read_string,
};
-static const struct hwmon_channel_info *sfp_hwmon_info[] = {
+static const struct hwmon_channel_info * const sfp_hwmon_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(in,
@@ -1947,11 +1957,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- /* Some SFP modules and also some Linux I2C drivers do not like reads
- * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
- * a time.
- */
- sfp->i2c_block_size = 16;
+ sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -2509,6 +2515,9 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
unsigned int first, last, len;
int ret;
+ if (!(sfp->state & SFP_F_PRESENT))
+ return -ENODEV;
+
if (ee->len == 0)
return -EINVAL;
@@ -2541,6 +2550,9 @@ static int sfp_module_eeprom_by_page(struct sfp *sfp,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack)
{
+ if (!(sfp->state & SFP_F_PRESENT))
+ return -ENODEV;
+
if (page->bank) {
NL_SET_ERR_MSG(extack, "Banks not supported");
return -EOPNOTSUPP;
@@ -2645,6 +2657,7 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
+ sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);
diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
index 26ef3706445e..0c1e8970ee58 100644
--- a/drivers/net/thunderbolt/main.c
+++ b/drivers/net/thunderbolt/main.c
@@ -148,7 +148,7 @@ struct tbnet_ring {
/**
* struct tbnet - ThunderboltIP network driver private data
* @svc: XDomain service the driver is bound to
- * @xd: XDomain the service blongs to
+ * @xd: XDomain the service belongs to
* @handler: ThunderboltIP configuration protocol handler
* @dev: Networking device
* @napi: NAPI structure for Rx polling
@@ -764,7 +764,7 @@ static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
*/
if (net->skb && net->rx_hdr.frame_count) {
/* Check the frame count fits the count field */
- if (frame_count != net->rx_hdr.frame_count) {
+ if (frame_count != le32_to_cpu(net->rx_hdr.frame_count)) {
net->stats.rx_length_errors++;
return false;
}
@@ -772,8 +772,8 @@ static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
/* Check the frame identifiers are incremented correctly,
* and id is matching.
*/
- if (frame_index != net->rx_hdr.frame_index + 1 ||
- frame_id != net->rx_hdr.frame_id) {
+ if (frame_index != le16_to_cpu(net->rx_hdr.frame_index) + 1 ||
+ frame_id != le16_to_cpu(net->rx_hdr.frame_id)) {
net->stats.rx_missed_errors++;
return false;
}
@@ -873,11 +873,12 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
TBNET_RX_PAGE_SIZE - hdr_size);
}
- net->rx_hdr.frame_size = frame_size;
- net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
- net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
- net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
- last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
+ net->rx_hdr.frame_size = hdr->frame_size;
+ net->rx_hdr.frame_count = hdr->frame_count;
+ net->rx_hdr.frame_index = hdr->frame_index;
+ net->rx_hdr.frame_id = hdr->frame_id;
+ last = le16_to_cpu(net->rx_hdr.frame_index) ==
+ le32_to_cpu(net->rx_hdr.frame_count) - 1;
rx_packets++;
net->stats.rx_bytes += frame_size;
@@ -990,8 +991,10 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
{
struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
- __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
unsigned int i, len, offset = skb_transport_offset(skb);
+ /* Remove payload length from checksum */
+ u32 paylen = skb->len - skb_transport_offset(skb);
+ __wsum wsum = (__force __wsum)htonl(paylen);
__be16 protocol = skb->protocol;
void *data = skb->data;
void *dest = hdr + 1;
@@ -1027,7 +1030,7 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
/* Data points on the beginning of packet.
* Check is the checksum absolute place in the packet.
* ipcso will update IP checksum.
- * tucso will update TCP/UPD checksum.
+ * tucso will update TCP/UDP checksum.
*/
if (protocol == htons(ETH_P_IP)) {
__sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index decb5ba56a25..0fc4b959edc1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1943,7 +1943,7 @@ static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
if (!rx_agg)
return NULL;
- rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
+ rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order);
if (!rx_agg->page)
goto free_rx;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c1178915496d..4b3c6647edc6 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1262,11 +1262,12 @@ static void veth_set_xdp_features(struct net_device *dev)
peer = rtnl_dereference(priv->peer);
if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
+ struct veth_priv *priv_peer = netdev_priv(peer);
xdp_features_t val = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
- if (priv->_xdp_prog || veth_gro_requested(dev))
+ if (priv_peer->_xdp_prog || veth_gro_requested(peer))
val |= NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
xdp_set_features_flag(dev, val);
@@ -1504,19 +1505,23 @@ static int veth_set_features(struct net_device *dev,
{
netdev_features_t changed = features ^ dev->features;
struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
int err;
if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
return 0;
+ peer = rtnl_dereference(priv->peer);
if (features & NETIF_F_GRO) {
err = veth_napi_enable(dev);
if (err)
return err;
- xdp_features_set_redirect_target(dev, true);
+ if (peer)
+ xdp_features_set_redirect_target(peer, true);
} else {
- xdp_features_clear_redirect_target(dev);
+ if (peer)
+ xdp_features_clear_redirect_target(peer);
veth_napi_del(dev);
}
return 0;
@@ -1598,13 +1603,13 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
peer->max_mtu = max_mtu;
}
- xdp_features_set_redirect_target(dev, true);
+ xdp_features_set_redirect_target(peer, true);
}
if (old_prog) {
if (!prog) {
- if (!veth_gro_requested(dev))
- xdp_features_clear_redirect_target(dev);
+ if (peer && !veth_gro_requested(dev))
+ xdp_features_clear_redirect_target(peer);
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
@@ -1648,14 +1653,18 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
return 0;
}
-static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
{
struct veth_xdp_buff *_ctx = (void *)ctx;
+ struct sk_buff *skb = _ctx->skb;
- if (!_ctx->skb)
+ if (!skb)
return -ENODATA;
- *hash = skb_get_hash(_ctx->skb);
+ *hash = skb_get_hash(skb);
+ *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
+
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e2560b6f7980..8d8038538fc4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -815,8 +815,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
int page_off,
unsigned int *len)
{
- struct page *page = alloc_page(GFP_ATOMIC);
+ int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ struct page *page;
+
+ if (page_off + *len + tailroom > PAGE_SIZE)
+ return NULL;
+ page = alloc_page(GFP_ATOMIC);
if (!page)
return NULL;
@@ -824,7 +829,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
page_off += *len;
while (--*num_buf) {
- int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int buflen;
void *buf;
int off;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index da488cbb0542..f2b76ee866a4 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1504,7 +1504,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}
- if (rxDataRingUsed) {
+ if (rxDataRingUsed && adapter->rxdataring_enabled) {
size_t sz;
BUG_ON(rcd->len > rq->data_ring.desc_size);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 86995e8dc913..a62ee05c5409 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -16,7 +16,7 @@
#include "pci.h"
#include "pcic.h"
-#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index f5dc3bb11b64..ff710b0b5071 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1001,15 +1001,34 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
- int val)
+static void brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev *sdiodev)
{
#if IS_ENABLED(CONFIG_ACPI)
struct acpi_device *adev;
- adev = ACPI_COMPANION(dev);
+ adev = ACPI_COMPANION(&sdiodev->func1->dev);
if (adev)
- adev->flags.power_manageable = 0;
+ sdiodev->func1_power_manageable = adev->flags.power_manageable;
+
+ adev = ACPI_COMPANION(&sdiodev->func2->dev);
+ if (adev)
+ sdiodev->func2_power_manageable = adev->flags.power_manageable;
+#endif
+}
+
+static void brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev *sdiodev,
+ int enable)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&sdiodev->func1->dev);
+ if (adev)
+ adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0;
+
+ adev = ACPI_COMPANION(&sdiodev->func2->dev);
+ if (adev)
+ adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0;
#endif
}
@@ -1019,7 +1038,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
- struct device *dev;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1027,14 +1045,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
- dev = &func->dev;
-
/* Set MMC_QUIRK_LENIENT_FN0 for this card */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
- /* prohibit ACPI power management for this device */
- brcmf_sdiod_acpi_set_power_manageable(dev, 0);
-
/* Consume func num 1 but dont do anything with it. */
if (func->num == 1)
return 0;
@@ -1066,6 +1079,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
dev_set_drvdata(&sdiodev->func1->dev, bus_if);
sdiodev->dev = &sdiodev->func1->dev;
+ brcmf_sdiod_acpi_save_power_manageable(sdiodev);
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
@@ -1131,6 +1145,8 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
if (sdiodev->settings->bus.sdio.oob_irq_supported ||
pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
+ /* Stop ACPI from turning off the device when wowl is enabled */
+ brcmf_sdiod_acpi_set_power_manageable(sdiodev, !enabled);
sdiodev->wowl_enabled = enabled;
brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index b76d34d36bde..0d18ed15b403 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -188,6 +188,8 @@ struct brcmf_sdio_dev {
char nvram_name[BRCMF_FW_NAME_LEN];
char clm_name[BRCMF_FW_NAME_LEN];
bool wowl_enabled;
+ bool func1_power_manageable;
+ bool func2_power_manageable;
enum brcmf_sdiod_state state;
struct brcmf_sdiod_freezer *freezer;
const struct firmware *clm_fw;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index ca50feb0b3a9..1b1358c6bb46 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -512,15 +512,15 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- } else {
+ if (cmd != SET_KEY) {
if (idx == wcid->hw_key_idx)
wcid->hw_key_idx = -1;
- key = NULL;
+ return 0;
}
+
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
mt76_wcid_key_setup(&dev->mt76, wcid, key);
return mt7603_wtbl_set_key(dev, wcid->idx, key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index d90378a30d15..da1d17b73a25 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1188,8 +1188,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
- enum mt76_cipher_type cipher, u16 cipher_mask,
- enum set_key_cmd cmd)
+ enum mt76_cipher_type cipher, u16 cipher_mask)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
u8 data[32] = {};
@@ -1198,27 +1197,18 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
return -EINVAL;
mt76_rr_copy(dev, addr, data, sizeof(data));
- if (cmd == SET_KEY) {
- if (cipher == MT_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(data, key->key, 16);
- memcpy(data + 16, key->key + 24, 8);
- memcpy(data + 24, key->key + 16, 8);
- } else {
- if (cipher_mask == BIT(cipher))
- memcpy(data, key->key, key->keylen);
- else if (cipher != MT_CIPHER_BIP_CMAC_128)
- memcpy(data, key->key, 16);
- if (cipher == MT_CIPHER_BIP_CMAC_128)
- memcpy(data + 16, key->key, 16);
- }
+ if (cipher == MT_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(data, key->key, 16);
+ memcpy(data + 16, key->key + 24, 8);
+ memcpy(data + 24, key->key + 16, 8);
} else {
+ if (cipher_mask == BIT(cipher))
+ memcpy(data, key->key, key->keylen);
+ else if (cipher != MT_CIPHER_BIP_CMAC_128)
+ memcpy(data, key->key, 16);
if (cipher == MT_CIPHER_BIP_CMAC_128)
- memset(data + 16, 0, 16);
- else if (cipher_mask)
- memset(data, 0, 16);
- if (!cipher_mask)
- memset(data, 0, sizeof(data));
+ memcpy(data + 16, key->key, 16);
}
mt76_wr_copy(dev, addr, data, sizeof(data));
@@ -1229,7 +1219,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_cipher_type cipher, u16 cipher_mask,
- int keyidx, enum set_key_cmd cmd)
+ int keyidx)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1248,9 +1238,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
else
w0 &= ~MT_WTBL_W0_RX_IK_VALID;
- if (cmd == SET_KEY &&
- (cipher != MT_CIPHER_BIP_CMAC_128 ||
- cipher_mask == BIT(cipher))) {
+ if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
w0 &= ~MT_WTBL_W0_KEY_IDX;
w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
}
@@ -1267,19 +1255,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- enum mt76_cipher_type cipher, u16 cipher_mask,
- enum set_key_cmd cmd)
+ enum mt76_cipher_type cipher, u16 cipher_mask)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
- if (!cipher_mask) {
- mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
- return;
- }
-
- if (cmd != SET_KEY)
- return;
-
if (cipher == MT_CIPHER_BIP_CMAC_128 &&
cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
return;
@@ -1290,8 +1269,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+ struct ieee80211_key_conf *key)
{
enum mt76_cipher_type cipher;
u16 cipher_mask = wcid->cipher;
@@ -1301,19 +1279,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
- if (cmd == SET_KEY)
- cipher_mask |= BIT(cipher);
- else
- cipher_mask &= ~BIT(cipher);
-
- mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
- err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
- cmd);
+ cipher_mask |= BIT(cipher);
+ mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
+ err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
if (err < 0)
return err;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
- key->keyidx, cmd);
+ key->keyidx);
if (err < 0)
return err;
@@ -1324,13 +1297,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+ struct ieee80211_key_conf *key)
{
int err;
spin_lock_bh(&dev->mt76.lock);
- err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
spin_unlock_bh(&dev->mt76.lock);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index ab4c1b4478aa..dadb13f2ca09 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -391,18 +391,17 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY)
*wcid_keyidx = idx;
- else if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- else
+ else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
goto out;
+ }
- mt76_wcid_key_setup(&dev->mt76, wcid,
- cmd == SET_KEY ? key : NULL);
-
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
if (mt76_is_mmio(&dev->mt76))
- err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ err = mt7615_mac_wtbl_set_key(dev, wcid, key);
else
- err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
out:
mt7615_mutex_release(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 0381c53bc96a..582d1b5b7cb3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -482,11 +482,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
void mt7615_mac_set_timing(struct mt7615_phy *phy);
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
+ struct ieee80211_key_conf *key);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
+ struct ieee80211_key_conf *key);
void mt7615_mac_reset_work(struct work_struct *work);
u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 7451a63206a5..dcbb5c605dfe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -454,20 +454,20 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
wcid = msta ? &msta->wcid : &mvif->group_wcid;
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- wcid->sw_iv = true;
- }
- } else {
+ if (cmd != SET_KEY) {
if (idx == wcid->hw_key_idx) {
wcid->hw_key_idx = -1;
wcid->sw_iv = false;
}
- key = NULL;
+ return 0;
+ }
+
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 2ada2806de66..1b361199c061 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -409,16 +409,15 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7915_mcu_add_bss_info(phy, vif, true);
}
- if (cmd == SET_KEY)
+ if (cmd == SET_KEY) {
*wcid_keyidx = idx;
- else if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- else
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
goto out;
+ }
- mt76_wcid_key_setup(&dev->mt76, wcid,
- cmd == SET_KEY ? key : NULL);
-
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
key, MCU_EXT_CMD(STA_REC_UPDATE),
&msta->wcid, cmd);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index e929f6eb65ce..bf1da9fddfab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -176,12 +176,12 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
static u8
mt7921_get_offload_capability(struct device *dev, const char *fw_wm)
{
- struct mt7921_fw_features *features = NULL;
const struct mt76_connac2_fw_trailer *hdr;
struct mt7921_realease_info *rel_info;
const struct firmware *fw;
int ret, i, offset = 0;
const u8 *data, *end;
+ u8 offload_caps = 0;
ret = request_firmware(&fw, fw_wm, dev);
if (ret)
@@ -213,7 +213,10 @@ mt7921_get_offload_capability(struct device *dev, const char *fw_wm)
data += sizeof(*rel_info);
if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
+ struct mt7921_fw_features *features;
+
features = (struct mt7921_fw_features *)data;
+ offload_caps = features->data;
break;
}
@@ -223,7 +226,7 @@ mt7921_get_offload_capability(struct device *dev, const char *fw_wm)
out:
release_firmware(fw);
- return features ? features->data : 0;
+ return offload_caps;
}
struct ieee80211_ops *
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0c9a472bc81a..3b6adb29cbef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -548,16 +548,15 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7921_mutex_acquire(dev);
- if (cmd == SET_KEY)
+ if (cmd == SET_KEY) {
*wcid_keyidx = idx;
- else if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- else
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
goto out;
+ }
- mt76_wcid_key_setup(&dev->mt76, wcid,
- cmd == SET_KEY ? key : NULL);
-
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
&msta->wcid, cmd);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 1c727870bbdb..ddb1fa4ee01d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -20,7 +20,7 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
- .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 0975774fe244..f306e9c50ea3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -356,16 +356,15 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7996_mcu_add_bss_info(phy, vif, true);
}
- if (cmd == SET_KEY)
+ if (cmd == SET_KEY) {
*wcid_keyidx = idx;
- else if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- else
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
goto out;
+ }
- mt76_wcid_key_setup(&dev->mt76, wcid,
- cmd == SET_KEY ? key : NULL);
-
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
&msta->wcid, cmd);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 5bf5a93937c9..04517bd3325a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
- return ret;
+ goto set_mask_fail;
}
ipc_pcie_config_aspm(ipc_pcie);
@@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
imem_init_fail:
ipc_pcie_resources_release(ipc_pcie);
resources_req_fail:
+set_mask_fail:
pci_disable_device(pci);
pci_enable_fail:
kfree(ipc_pcie);
diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c
index 06f4b02f1552..86b60aadfa11 100644
--- a/drivers/net/wwan/rpmsg_wwan_ctrl.c
+++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c
@@ -149,6 +149,7 @@ static const struct rpmsg_device_id rpmsg_wwan_ctrl_id_table[] = {
/* RPMSG channels for Qualcomm SoCs with integrated modem */
{ .name = "DATA5_CNTL", .driver_data = WWAN_PORT_QMI },
{ .name = "DATA4", .driver_data = WWAN_PORT_AT },
+ { .name = "DATA1", .driver_data = WWAN_PORT_AT },
{},
};
MODULE_DEVICE_TABLE(rpmsg, rpmsg_wwan_ctrl_id_table);
diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
index 268ff9e87e5b..2652cd00504e 100644
--- a/drivers/net/wwan/t7xx/Makefile
+++ b/drivers/net/wwan/t7xx/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y += -Werror
-
obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
mtk_t7xx-y:= t7xx_pci.o \
t7xx_pcie_mac.o \
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 2e1c01cf00a9..aa54fa6d5f90 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -492,6 +492,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
if (err)
goto error_put_device;
+ dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
return port;
error_put_device:
@@ -517,6 +518,8 @@ void wwan_remove_port(struct wwan_port *port)
skb_queue_purge(&port->rxq);
dev_set_drvdata(&port->dev, NULL);
+
+ dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
device_unregister(&port->dev);
/* Release related wwan device */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 53ef028596c6..d6a9bac91a4c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue);
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+ ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
if (ctrl->max_discard_sectors == 0) {
blk_queue_max_discard_sectors(queue, 0);
return;
@@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
if (queue->limits.max_discard_sectors)
return;
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
- ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
-
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b615906263f3..cd7873de3121 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3441,6 +3441,9 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ .driver_data = NVME_QUIRK_BOGUS_NID |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 42c0598c31f2..49c9e7bc9116 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1620,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
if (ret)
goto err_init_connect;
- queue->rd_enabled = true;
set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
- nvme_tcp_init_recv_ctx(queue);
-
- write_lock_bh(&queue->sock->sk->sk_callback_lock);
- queue->sock->sk->sk_user_data = queue;
- queue->state_change = queue->sock->sk->sk_state_change;
- queue->data_ready = queue->sock->sk->sk_data_ready;
- queue->write_space = queue->sock->sk->sk_write_space;
- queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
- queue->sock->sk->sk_state_change = nvme_tcp_state_change;
- queue->sock->sk->sk_write_space = nvme_tcp_write_space;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- queue->sock->sk->sk_ll_usec = 1;
-#endif
- write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0;
@@ -1655,7 +1640,7 @@ err_destroy_mutex:
return ret;
}
-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
{
struct socket *sock = queue->sock;
@@ -1670,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
{
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- nvme_tcp_restore_sock_calls(queue);
+ nvme_tcp_restore_sock_ops(queue);
cancel_work_sync(&queue->io_work);
}
@@ -1688,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_unlock(&queue->queue_lock);
}
+static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+{
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_user_data = queue;
+ queue->state_change = queue->sock->sk->sk_state_change;
+ queue->data_ready = queue->sock->sk->sk_data_ready;
+ queue->write_space = queue->sock->sk->sk_write_space;
+ queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+ queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+ queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ queue->sock->sk->sk_ll_usec = 1;
+#endif
+ write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+}
+
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[idx];
int ret;
+ queue->rd_enabled = true;
+ nvme_tcp_init_recv_ctx(queue);
+ nvme_tcp_setup_sock_ops(queue);
+
if (idx)
ret = nvmf_connect_io_queue(nctrl, idx);
else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
- set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
+ set_bit(NVME_TCP_Q_LIVE, &queue->flags);
} else {
- if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
- __nvme_tcp_stop_queue(&ctrl->queues[idx]);
+ if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
dev_err(nctrl->device,
"failed to connect queue: %d ret=%d\n", idx, ret);
}
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 07d93753b12f..e311d406b170 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -226,6 +226,7 @@ static void __of_attach_node(struct device_node *np)
np->sibling = np->parent->child;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
+ np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
}
/**
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b2bd2e783445..78ae84187449 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -737,6 +737,11 @@ static int of_platform_notify(struct notifier_block *nb,
if (of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 53a16b8b6ac2..8e33e6e59e68 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -1001,11 +1001,6 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
- val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
- val &= ~PORT_LINK_FAST_LINK_MODE;
- val |= PORT_LINK_DLL_LINK_EN;
- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
-
if (dw_pcie_cap_is(pci, CDM_CHECK)) {
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
@@ -1013,6 +1008,11 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
}
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index 66d9ab288646..e5e9b287b976 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -128,7 +128,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
return -EIO;
/* Length is 2 DW of header + length of payload in DW */
- length = 2 + task->request_pl_sz / sizeof(u32);
+ length = 2 + task->request_pl_sz / sizeof(__le32);
if (length > PCI_DOE_MAX_LENGTH)
return -EIO;
if (length == PCI_DOE_MAX_LENGTH)
@@ -141,9 +141,9 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
length));
- for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
- task->request_pl[i]);
+ le32_to_cpu(task->request_pl[i]));
pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
@@ -195,11 +195,11 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
/* First 2 dwords have already been read */
length -= 2;
- payload_length = min(length, task->response_pl_sz / sizeof(u32));
+ payload_length = min(length, task->response_pl_sz / sizeof(__le32));
/* Read the rest of the response payload */
for (i = 0; i < payload_length; i++) {
- pci_read_config_dword(pdev, offset + PCI_DOE_READ,
- &task->response_pl[i]);
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ task->response_pl[i] = cpu_to_le32(val);
/* Prior to the last ack, ensure Data Object Ready */
if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
return -EIO;
@@ -217,13 +217,14 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
return -EIO;
- return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+ return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32);
}
static void signal_task_complete(struct pci_doe_task *task, int rv)
{
task->rv = rv;
task->complete(task);
+ destroy_work_on_stack(&task->work);
}
static void signal_task_abort(struct pci_doe_task *task, int rv)
@@ -317,14 +318,16 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
*index);
+ __le32 request_pl_le = cpu_to_le32(request_pl);
+ __le32 response_pl_le;
u32 response_pl;
DECLARE_COMPLETION_ONSTACK(c);
struct pci_doe_task task = {
.prot.vid = PCI_VENDOR_ID_PCI_SIG,
.prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
- .request_pl = &request_pl,
+ .request_pl = &request_pl_le,
.request_pl_sz = sizeof(request_pl),
- .response_pl = &response_pl,
+ .response_pl = &response_pl_le,
.response_pl_sz = sizeof(response_pl),
.complete = pci_doe_task_complete,
.private = &c,
@@ -340,6 +343,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
if (task.rv != sizeof(response_pl))
return -EIO;
+ response_pl = le32_to_cpu(response_pl_le);
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
*protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
response_pl);
@@ -520,6 +524,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
* task->complete will be called when the state machine is done processing this
* task.
*
+ * @task must be allocated on the stack.
+ *
* Excess data will be discarded.
*
* RETURNS: 0 when task has been successfully queued, -ERRNO on error
@@ -533,15 +539,15 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
* DOE requests must be a whole number of DW and the response needs to
* be big enough for at least 1 DW
*/
- if (task->request_pl_sz % sizeof(u32) ||
- task->response_pl_sz < sizeof(u32))
+ if (task->request_pl_sz % sizeof(__le32) ||
+ task->response_pl_sz < sizeof(__le32))
return -EINVAL;
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
return -EIO;
task->doe_mb = doe_mb;
- INIT_WORK(&task->work, doe_statemachine_work);
+ INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
queue_work(doe_mb->work_queue, &task->work);
return 0;
}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 0145aef1b930..22d39e12b236 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -157,8 +157,6 @@ void pci_remove_root_bus(struct pci_bus *bus)
list_for_each_entry_safe(child, tmp,
&bus->devices, bus_list)
pci_remove_bus_device(child);
- pci_remove_bus(bus);
- host_bridge->bus = NULL;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
/* Release domain_nr if it was dynamically allocated */
@@ -166,6 +164,9 @@ void pci_remove_root_bus(struct pci_bus *bus)
pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
#endif
+ pci_remove_bus(bus);
+ host_bridge->bus = NULL;
+
/* remove the host bridge */
device_del(&host_bridge->dev);
}
diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
index a78fdb15e26c..8b643888d503 100644
--- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
@@ -21,23 +21,23 @@
#define DMC_QOS_IRQ BIT(30)
/* DMC bandwidth monitor register address offset */
-#define DMC_MON_G12_CTRL0 (0x20 << 2)
-#define DMC_MON_G12_CTRL1 (0x21 << 2)
-#define DMC_MON_G12_CTRL2 (0x22 << 2)
-#define DMC_MON_G12_CTRL3 (0x23 << 2)
-#define DMC_MON_G12_CTRL4 (0x24 << 2)
-#define DMC_MON_G12_CTRL5 (0x25 << 2)
-#define DMC_MON_G12_CTRL6 (0x26 << 2)
-#define DMC_MON_G12_CTRL7 (0x27 << 2)
-#define DMC_MON_G12_CTRL8 (0x28 << 2)
-
-#define DMC_MON_G12_ALL_REQ_CNT (0x29 << 2)
-#define DMC_MON_G12_ALL_GRANT_CNT (0x2a << 2)
-#define DMC_MON_G12_ONE_GRANT_CNT (0x2b << 2)
-#define DMC_MON_G12_SEC_GRANT_CNT (0x2c << 2)
-#define DMC_MON_G12_THD_GRANT_CNT (0x2d << 2)
-#define DMC_MON_G12_FOR_GRANT_CNT (0x2e << 2)
-#define DMC_MON_G12_TIMER (0x2f << 2)
+#define DMC_MON_G12_CTRL0 (0x0 << 2)
+#define DMC_MON_G12_CTRL1 (0x1 << 2)
+#define DMC_MON_G12_CTRL2 (0x2 << 2)
+#define DMC_MON_G12_CTRL3 (0x3 << 2)
+#define DMC_MON_G12_CTRL4 (0x4 << 2)
+#define DMC_MON_G12_CTRL5 (0x5 << 2)
+#define DMC_MON_G12_CTRL6 (0x6 << 2)
+#define DMC_MON_G12_CTRL7 (0x7 << 2)
+#define DMC_MON_G12_CTRL8 (0x8 << 2)
+
+#define DMC_MON_G12_ALL_REQ_CNT (0x9 << 2)
+#define DMC_MON_G12_ALL_GRANT_CNT (0xa << 2)
+#define DMC_MON_G12_ONE_GRANT_CNT (0xb << 2)
+#define DMC_MON_G12_SEC_GRANT_CNT (0xc << 2)
+#define DMC_MON_G12_THD_GRANT_CNT (0xd << 2)
+#define DMC_MON_G12_FOR_GRANT_CNT (0xe << 2)
+#define DMC_MON_G12_TIMER (0xf << 2)
/* Each bit represent a axi line */
PMU_FORMAT_ATTR(event, "config:0-7");
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index f20c28334bcb..a71874fed3d6 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -45,35 +45,35 @@ config PINCTRL_MTK_PARIS
# For ARMv7 SoCs
config PINCTRL_MT2701
- bool "Mediatek MT2701 pin control"
+ bool "MediaTek MT2701 pin control"
depends on MACH_MT7623 || MACH_MT2701 || COMPILE_TEST
depends on OF
default MACH_MT2701
select PINCTRL_MTK
config PINCTRL_MT7623
- bool "Mediatek MT7623 pin control with generic binding"
+ bool "MediaTek MT7623 pin control with generic binding"
depends on MACH_MT7623 || COMPILE_TEST
depends on OF
default MACH_MT7623
select PINCTRL_MTK_MOORE
config PINCTRL_MT7629
- bool "Mediatek MT7629 pin control"
+ bool "MediaTek MT7629 pin control"
depends on MACH_MT7629 || COMPILE_TEST
depends on OF
default MACH_MT7629
select PINCTRL_MTK_MOORE
config PINCTRL_MT8135
- bool "Mediatek MT8135 pin control"
+ bool "MediaTek MT8135 pin control"
depends on MACH_MT8135 || COMPILE_TEST
depends on OF
default MACH_MT8135
select PINCTRL_MTK
config PINCTRL_MT8127
- bool "Mediatek MT8127 pin control"
+ bool "MediaTek MT8127 pin control"
depends on MACH_MT8127 || COMPILE_TEST
depends on OF
default MACH_MT8127
@@ -88,33 +88,33 @@ config PINCTRL_MT2712
select PINCTRL_MTK
config PINCTRL_MT6765
- tristate "Mediatek MT6765 pin control"
+ tristate "MediaTek MT6765 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT6779
- tristate "Mediatek MT6779 pin control"
+ tristate "MediaTek MT6779 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
help
Say yes here to support pin controller and gpio driver
- on Mediatek MT6779 SoC.
+ on MediaTek MT6779 SoC.
In MTK platform, we support virtual gpio and use it to
map specific eint which doesn't have real gpio pin.
config PINCTRL_MT6795
- bool "Mediatek MT6795 pin control"
+ bool "MediaTek MT6795 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT6797
- bool "Mediatek MT6797 pin control"
+ bool "MediaTek MT6797 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
@@ -128,40 +128,42 @@ config PINCTRL_MT7622
select PINCTRL_MTK_MOORE
config PINCTRL_MT7981
- bool "Mediatek MT7981 pin control"
+ bool "MediaTek MT7981 pin control"
depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
config PINCTRL_MT7986
- bool "Mediatek MT7986 pin control"
+ bool "MediaTek MT7986 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
config PINCTRL_MT8167
- bool "Mediatek MT8167 pin control"
+ bool "MediaTek MT8167 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK
config PINCTRL_MT8173
- bool "Mediatek MT8173 pin control"
+ bool "MediaTek MT8173 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK
config PINCTRL_MT8183
- bool "Mediatek MT8183 pin control"
+ bool "MediaTek MT8183 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT8186
- bool "Mediatek MT8186 pin control"
+ bool "MediaTek MT8186 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
@@ -180,28 +182,28 @@ config PINCTRL_MT8188
map specific eint which doesn't have real gpio pin.
config PINCTRL_MT8192
- bool "Mediatek MT8192 pin control"
+ bool "MediaTek MT8192 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT8195
- bool "Mediatek MT8195 pin control"
+ bool "MediaTek MT8195 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
config PINCTRL_MT8365
- bool "Mediatek MT8365 pin control"
+ bool "MediaTek MT8365 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK
config PINCTRL_MT8516
- bool "Mediatek MT8516 pin control"
+ bool "MediaTek MT8516 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
@@ -209,7 +211,7 @@ config PINCTRL_MT8516
# For PMIC
config PINCTRL_MT6397
- bool "Mediatek MT6397 pin control"
+ bool "MediaTek MT6397 pin control"
depends on MFD_MT6397 || COMPILE_TEST
depends on OF
default MFD_MT6397
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 373eed8bc4be..c775d239444a 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1206,7 +1206,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
dev_err(dev, "can't add the irq domain\n");
return -ENODEV;
}
- atmel_pioctrl->irq_domain->name = "atmel gpio";
for (i = 0; i < atmel_pioctrl->npins; i++) {
int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 29e4a6282a64..1dcbd0937ef5 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1204,7 +1204,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
BIT(p), f << p);
regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
- BIT(p), f << (p - 1));
+ BIT(p), (f >> 1) << p);
return 0;
}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index cb33a23ab0c1..04ace4c7bd58 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1330,7 +1330,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
if (fwnode_property_read_u32(fwnode, "st,bank-ioport", &bank_ioport_nr))
bank_ioport_nr = bank_nr;
- bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+ bank->gpio_chip.base = -1;
bank->gpio_chip.ngpio = npins;
bank->gpio_chip.fwnode = fwnode;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index cb15acdf14a3..e2c9a68d12df 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -464,7 +464,8 @@ static const struct dmi_system_id asus_quirks[] = {
.ident = "ASUS ROG FLOW X13",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
+ /* Match GV301** */
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV301"),
},
.driver_data = &quirk_asus_tablet_mode,
},
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 4dd39ab6ecfa..2a426040f749 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -151,6 +151,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B650 AORUS ELITE AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
@@ -160,6 +161,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570S AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
{ }
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 0eb5bfdd823a..959ec3c5f376 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1170,7 +1170,6 @@ static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 65, { KEY_PROG4 } },
{ KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
{ KE_KEY, 67, { KEY_TOUCHPAD_ON } },
- { KE_KEY, 68, { KEY_TOUCHPAD_TOGGLE } },
{ KE_KEY, 128, { KEY_ESC } },
/*
@@ -1526,18 +1525,16 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_
if (priv->features.ctrl_ps2_aux_port)
i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
- if (send_events) {
- /*
- * On older models the EC controls the touchpad and toggles it
- * on/off itself, in this case we report KEY_TOUCHPAD_ON/_OFF.
- * If the EC did not toggle, report KEY_TOUCHPAD_TOGGLE.
- */
- if (value != priv->r_touchpad_val) {
- ideapad_input_report(priv, value ? 67 : 66);
- sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
- } else {
- ideapad_input_report(priv, 68);
- }
+ /*
+ * On older models the EC controls the touchpad and toggles it on/off
+ * itself, in this case we report KEY_TOUCHPAD_ON/_OFF. Some models do
+ * an acpi-notify with VPC bit 5 set on resume, so this function get
+ * called with send_events=true on every resume. Therefor if the EC did
+ * not toggle, do nothing to avoid sending spurious KEY_TOUCHPAD_TOGGLE.
+ */
+ if (send_events && value != priv->r_touchpad_val) {
+ ideapad_input_report(priv, value ? 67 : 66);
+ sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
}
priv->r_touchpad_val = value;
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 3a15d32d7644..b9591969e0fa 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -66,7 +66,18 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
{
- return (u64)value * pmcdev->map->slp_s0_res_counter_step;
+ /*
+ * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
+ * used as a workaround which uses 30.5 usec tick. All other client
+ * programs have the legacy SLP_S0 residency counter that is using the 122
+ * usec tick.
+ */
+ const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
+
+ if (pmcdev->map == &adl_reg_map)
+ return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
+ else
+ return (u64)value * pmcdev->map->slp_s0_res_counter_step;
}
static int set_etr3(struct pmc_dev *pmcdev)
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index c816646eb661..78dc82bda4dd 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -920,7 +920,7 @@ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *at
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
- char *item, *value;
+ char *item, *value, *p;
int ret;
ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
@@ -930,10 +930,15 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
/* validate and split from `item,value` -> `value` */
value = strpbrk(item, ",");
if (!value || value == item || !strlen(value + 1))
- return -EINVAL;
-
- ret = sysfs_emit(buf, "%s\n", value + 1);
+ ret = -EINVAL;
+ else {
+ /* On Workstations remove the Options part after the value */
+ p = strchrnul(value, ';');
+ *p = '\0';
+ ret = sysfs_emit(buf, "%s\n", value + 1);
+ }
kfree(item);
+
return ret;
}
@@ -1457,10 +1462,10 @@ static int tlmi_analyze(void)
* name string.
* Try and pull that out if it's available.
*/
- char *item, *optstart, *optend;
+ char *optitem, *optstart, *optend;
- if (!tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID)) {
- optstart = strstr(item, "[Optional:");
+ if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
+ optstart = strstr(optitem, "[Optional:");
if (optstart) {
optstart += strlen("[Optional:");
optend = strstr(optstart, "]");
@@ -1469,6 +1474,7 @@ static int tlmi_analyze(void)
kstrndup(optstart, optend - optstart,
GFP_KERNEL);
}
+ kfree(optitem);
}
}
/*
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 32c10457399e..7191ff2625b1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4479,6 +4479,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
}
},
{
+ .ident = "T14s Gen1 AMD",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+ }
+ },
+ {
.ident = "P14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index e01147f66e15..474725714a05 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -115,7 +115,14 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
}
if (pwm->chip->ops->get_state) {
- struct pwm_state state;
+ /*
+ * Zero-initialize state because most drivers are unaware of
+ * .usage_power. The other members of state are supposed to be
+ * set by lowlevel drivers. We still initialize the whole
+ * structure for simplicity even though this might paper over
+ * faulty implementations of .get_state().
+ */
+ struct pwm_state state = { 0, };
err = pwm->chip->ops->get_state(pwm->chip, pwm, &state);
trace_pwm_get(pwm, &state, err);
@@ -448,7 +455,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
{
struct pwm_state *last = &pwm->last;
struct pwm_chip *chip = pwm->chip;
- struct pwm_state s1, s2;
+ struct pwm_state s1 = { 0 }, s2 = { 0 };
int err;
if (!IS_ENABLED(CONFIG_PWM_DEBUG))
@@ -530,6 +537,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
return;
}
+ *last = (struct pwm_state){ 0 };
err = chip->ops->get_state(chip, pwm, last);
trace_pwm_get(pwm, last, err);
if (err)
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 86df6702cb83..ad18b0ebe3f1 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -198,6 +198,7 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->enabled = (ret > 0);
state->period = EC_PWM_MAX_DUTY;
+ state->polarity = PWM_POLARITY_NORMAL;
/*
* Note that "disabled" and "duty cycle == 0" are treated the same. If
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 12c05c155cab..1b9274c5ad87 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -146,6 +146,7 @@ static int hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
state->enabled = (PWM_ENABLE_MASK & value);
+ state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
return 0;
}
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 8362b4870c66..47b3141135f3 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -126,6 +126,7 @@ static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
mutex_unlock(&iqs620_pwm->lock);
state->period = IQS620_PWM_PERIOD_NS;
+ state->polarity = PWM_POLARITY_NORMAL;
return 0;
}
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 16d79ca5d8f5..5cd7b90872c6 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -162,6 +162,12 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
duty = state->duty_cycle;
period = state->period;
+ /*
+ * Note this is wrong. The result is an output wave that isn't really
+ * inverted and so is wrongly identified by .get_state as normal.
+ * Fixing this needs some care however as some machines might rely on
+ * this.
+ */
if (state->polarity == PWM_POLARITY_INVERSED)
duty = period - duty;
@@ -358,6 +364,8 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->duty_cycle = 0;
}
+ state->polarity = PWM_POLARITY_NORMAL;
+
return 0;
}
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index d866ce345f97..bde579a338c2 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -109,6 +109,7 @@ static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
duty = val & SPRD_PWM_DUTY_MSK;
tmp = (prescale + 1) * NSEC_PER_SEC * duty;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
+ state->polarity = PWM_POLARITY_NORMAL;
/* Disable PWM clocks if the PWM channel is not in enable state. */
if (!state->enabled)
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 529963a7e4f5..41537c45f036 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -8,18 +8,19 @@
// Copyright (c) 2012 Marvell Technology Ltd.
// Yunfan Zhang <yfzhang@marvell.com>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/param.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/fan53555.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/of_device.h>
-#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/regulator/fan53555.h>
/* Voltage setting */
#define FAN53555_VSEL0 0x00
@@ -60,7 +61,7 @@
#define TCS_VSEL1_MODE (1 << 6)
#define TCS_SLEW_SHIFT 3
-#define TCS_SLEW_MASK (0x3 < 3)
+#define TCS_SLEW_MASK GENMASK(4, 3)
enum fan53555_vendor {
FAN53526_VENDOR_FAIRCHILD = 0,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 2a9867abba20..e6724a229d23 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -215,7 +215,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->enable_clock = devm_clk_get(dev, NULL);
if (IS_ERR(drvdata->enable_clock)) {
dev_err(dev, "Can't get enable-clock from devicetree\n");
- return -ENOENT;
+ return PTR_ERR(drvdata->enable_clock);
}
} else if (drvtype && drvtype->has_performance_state) {
drvdata->desc.ops = &fixed_voltage_domain_ops;
diff --git a/drivers/regulator/sm5703-regulator.c b/drivers/regulator/sm5703-regulator.c
index 05ad28fc4da8..229df7170792 100644
--- a/drivers/regulator/sm5703-regulator.c
+++ b/drivers/regulator/sm5703-regulator.c
@@ -42,6 +42,7 @@ static const int sm5703_buck_voltagemap[] = {
.type = REGULATOR_VOLTAGE, \
.id = SM5703_USBLDO ## _id, \
.ops = &sm5703_regulator_ops_fixed, \
+ .n_voltages = 1, \
.fixed_uV = SM5703_USBLDO_MICROVOLT, \
.enable_reg = SM5703_REG_USBLDO12, \
.enable_mask = SM5703_REG_EN_USBLDO ##_id, \
@@ -56,6 +57,7 @@ static const int sm5703_buck_voltagemap[] = {
.type = REGULATOR_VOLTAGE, \
.id = SM5703_VBUS, \
.ops = &sm5703_regulator_ops_fixed, \
+ .n_voltages = 1, \
.fixed_uV = SM5703_VBUS_MICROVOLT, \
.enable_reg = SM5703_REG_CNTL, \
.enable_mask = SM5703_OPERATION_MODE_MASK, \
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c76f82fb8b63..15f452908926 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -771,13 +771,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
iscsi_set_param(cls_conn, param, buf, buflen);
break;
case ISCSI_PARAM_DATADGST_EN:
- iscsi_set_param(cls_conn, param, buf, buflen);
-
mutex_lock(&tcp_sw_conn->sock_lock);
if (!tcp_sw_conn->sock) {
mutex_unlock(&tcp_sw_conn->sock_lock);
return -ENOTCONN;
}
+ iscsi_set_param(cls_conn, param, buf, buflen);
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
mutex_unlock(&tcp_sw_conn->sock_lock);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3ceece988338..c895189375e2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3298,7 +3298,7 @@ fw_crash_buffer_show(struct device *cdev,
spin_lock_irqsave(&instance->crashdump_lock, flags);
buff_offset = instance->fw_crash_buffer_offset;
- if (!instance->crash_dump_buf &&
+ if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 84c9a55a5794..8a83f3fc2b86 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4771,7 +4771,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
devhandle = megasas_get_tm_devhandle(scmd->device);
if (devhandle == (u16)ULONG_MAX) {
- ret = SUCCESS;
+ ret = FAILED;
sdev_printk(KERN_INFO, scmd->device,
"task abort issued for invalid devhandle\n");
mutex_unlock(&instance->reset_mutex);
@@ -4841,7 +4841,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
devhandle = megasas_get_tm_devhandle(scmd->device);
if (devhandle == (u16)ULONG_MAX) {
- ret = SUCCESS;
+ ret = FAILED;
sdev_printk(KERN_INFO, scmd->device,
"target reset issued for invalid devhandle\n");
mutex_unlock(&instance->reset_mutex);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index a565817aa56d..d109a4ceb72b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->unrecoverable = 1;
goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
break;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 2ee9ea57554d..14ae0a9c5d3d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -6616,11 +6616,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
else if (rc == -EAGAIN)
goto try_32bit_dma;
total_sz += sense_sz;
- ioc_info(ioc,
- "sense pool(0x%p)- dma(0x%llx): depth(%d),"
- "element_size(%d), pool_size(%d kB)\n",
- ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz / 1024);
/* reply pool, 4 byte align */
sz = ioc->reply_free_queue_depth * ioc->reply_sz;
rc = _base_allocate_reply_pool(ioc, sz);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bee1b8a82020..d0cdbfe771a9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3617,6 +3617,7 @@ skip_dpc:
probe_failed:
qla_enode_stop(base_vha);
qla_edb_stop(base_vha);
+ vfree(base_vha->scan.l);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 5cce1ba70fc6..09ef0b31dfc0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -314,11 +314,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
if (result)
return -EIO;
- /* Sanity check that we got the page back that we asked for */
+ /*
+ * Sanity check that we got the page back that we asked for and that
+ * the page size is not 0.
+ */
if (buffer[1] != page)
return -EIO;
- return get_unaligned_be16(&buffer[2]) + 4;
+ result = get_unaligned_be16(&buffer[2]);
+ if (!result)
+ return -EIO;
+
+ return result + 4;
}
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index b11a9162e73a..b54f2c6c08c3 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -509,9 +509,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
int i;
struct ses_component *scomp;
- if (!edev->component[0].scratch)
- return 0;
-
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
@@ -602,8 +599,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
components++,
type_ptr[0],
name);
- else
+ else if (components < edev->components)
ecomp = &edev->component[components++];
+ else
+ ecomp = ERR_PTR(-EINVAL);
if (!IS_ERR(ecomp)) {
if (addl_desc_ptr) {
@@ -734,11 +733,6 @@ static int ses_intf_add(struct device *cdev,
components += type_ptr[1];
}
- if (components == 0) {
- sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
- goto err_free;
- }
-
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
@@ -780,9 +774,11 @@ static int ses_intf_add(struct device *cdev,
buf = NULL;
}
page2_not_supported:
- scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
- if (!scomp)
- goto err_free;
+ if (components > 0) {
+ scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
+ if (!scomp)
+ goto err_free;
+ }
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
components, &ses_enclosure_callbacks);
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index bd87d3c92dd3..69347b6bf60c 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -632,7 +632,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "Failed to request irq\n");
- return ret;
+ goto err_irq;
}
ret = rockchip_sfc_init(sfc);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 44b85a8d47f1..7bc14fb309a6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4456,6 +4456,11 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+ /*
+ * Clear the flag before adding the device so that fw_devlink
+ * doesn't skip adding consumers to this device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
spi = of_register_spi_device(ctlr, rd->dn);
put_device(&ctlr->dev);
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 290b1bb0e9cd..df5fb5410b72 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -488,7 +488,7 @@ static bool is_normal_memory(pgprot_t p)
#elif defined(CONFIG_ARM64)
return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
#else
-#error "Unuspported architecture"
+#error "Unsupported architecture"
#endif
}
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index b1c6231defad..673cf0359494 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -32,7 +32,7 @@ static int shm_get_kernel_pages(unsigned long start, size_t page_count,
is_kmap_addr((void *)start)))
return -EINVAL;
- page = virt_to_page(start);
+ page = virt_to_page((void *)start);
for (n = 0; n < page_count; n++) {
pages[n] = page + n;
get_page(pages[n]);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 90526f46c9b1..d71ee50e7878 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -153,7 +153,6 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
cancel_delayed_work_sync(&pci_info->work);
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
- thermal_zone_device_disable(tzd);
pci_info->stored_thres = 0;
return 0;
}
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index c7ba5680cd48..91fc7e239497 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -235,6 +235,12 @@ static int max_idle_set(const char *arg, const struct kernel_param *kp)
goto skip_limit_set;
}
+ if (!cpumask_available(idle_injection_cpu_mask)) {
+ ret = allocate_copy_idle_injection_mask(cpu_present_mask);
+ if (ret)
+ goto skip_limit_set;
+ }
+
if (check_invalid(idle_injection_cpu_mask, new_max_idle)) {
ret = -EINVAL;
goto skip_limit_set;
@@ -791,7 +797,8 @@ static int __init powerclamp_init(void)
return retval;
mutex_lock(&powerclamp_lock);
- retval = allocate_copy_idle_injection_mask(cpu_present_mask);
+ if (!cpumask_available(idle_injection_cpu_mask))
+ retval = allocate_copy_idle_injection_mask(cpu_present_mask);
mutex_unlock(&powerclamp_lock);
if (retval)
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
index 2e22bb82b738..e69868e868eb 100644
--- a/drivers/thermal/intel/therm_throt.c
+++ b/drivers/thermal/intel/therm_throt.c
@@ -193,8 +193,67 @@ static const struct attribute_group thermal_attr_group = {
#define THERM_THROT_POLL_INTERVAL HZ
#define THERM_STATUS_PROCHOT_LOG BIT(1)
-#define THERM_STATUS_CLEAR_CORE_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(13) | BIT(15))
-#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11))
+static u64 therm_intr_core_clear_mask;
+static u64 therm_intr_pkg_clear_mask;
+
+static void thermal_intr_init_core_clear_mask(void)
+{
+ if (therm_intr_core_clear_mask)
+ return;
+
+ /*
+ * Reference: Intel SDM Volume 4
+ * "Table 2-2. IA-32 Architectural MSRs", MSR 0x19C
+ * IA32_THERM_STATUS.
+ */
+
+ /*
+ * Bit 1, 3, 5: CPUID.01H:EDX[22] = 1. This driver will not
+ * enable interrupts, when 0 as it checks for X86_FEATURE_ACPI.
+ */
+ therm_intr_core_clear_mask = (BIT(1) | BIT(3) | BIT(5));
+
+ /*
+ * Bit 7 and 9: Thermal Threshold #1 and #2 log
+ * If CPUID.01H:ECX[8] = 1
+ */
+ if (boot_cpu_has(X86_FEATURE_TM2))
+ therm_intr_core_clear_mask |= (BIT(7) | BIT(9));
+
+ /* Bit 11: Power Limitation log (R/WC0) If CPUID.06H:EAX[4] = 1 */
+ if (boot_cpu_has(X86_FEATURE_PLN))
+ therm_intr_core_clear_mask |= BIT(11);
+
+ /*
+ * Bit 13: Current Limit log (R/WC0) If CPUID.06H:EAX[7] = 1
+ * Bit 15: Cross Domain Limit log (R/WC0) If CPUID.06H:EAX[7] = 1
+ */
+ if (boot_cpu_has(X86_FEATURE_HWP))
+ therm_intr_core_clear_mask |= (BIT(13) | BIT(15));
+}
+
+static void thermal_intr_init_pkg_clear_mask(void)
+{
+ if (therm_intr_pkg_clear_mask)
+ return;
+
+ /*
+ * Reference: Intel SDM Volume 4
+ * "Table 2-2. IA-32 Architectural MSRs", MSR 0x1B1
+ * IA32_PACKAGE_THERM_STATUS.
+ */
+
+ /* All bits except BIT 26 depend on CPUID.06H: EAX[6] = 1 */
+ if (boot_cpu_has(X86_FEATURE_PTS))
+ therm_intr_pkg_clear_mask = (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11));
+
+ /*
+ * Intel SDM Volume 2A: Thermal and Power Management Leaf
+ * Bit 26: CPUID.06H: EAX[19] = 1
+ */
+ if (boot_cpu_has(X86_FEATURE_HFI))
+ therm_intr_pkg_clear_mask |= BIT(26);
+}
/*
* Clear the bits in package thermal status register for bit = 1
@@ -207,13 +266,10 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask)
if (level == CORE_LEVEL) {
msr = MSR_IA32_THERM_STATUS;
- msr_val = THERM_STATUS_CLEAR_CORE_MASK;
+ msr_val = therm_intr_core_clear_mask;
} else {
msr = MSR_IA32_PACKAGE_THERM_STATUS;
- msr_val = THERM_STATUS_CLEAR_PKG_MASK;
- if (boot_cpu_has(X86_FEATURE_HFI))
- msr_val |= BIT(26);
-
+ msr_val = therm_intr_pkg_clear_mask;
}
msr_val &= ~bit_mask;
@@ -708,6 +764,9 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
apic_write(APIC_LVTTHMR, h);
+ thermal_intr_init_core_clear_mask();
+ thermal_intr_init_pkg_clear_mask();
+
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
wrmsr(MSR_IA32_THERM_INTERRUPT,
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index a4aba7b8bb8b..6c20c9f90a05 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -876,8 +876,6 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
unsigned long states = cdev->max_state + 1;
int var;
- lockdep_assert_held(&cdev->lock);
-
var = sizeof(*stats);
var += sizeof(*stats->time_in_state) * states;
var += sizeof(*stats->trans_table) * states * states;
@@ -903,8 +901,6 @@ out:
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
{
- lockdep_assert_held(&cdev->lock);
-
kfree(cdev->stats);
cdev->stats = NULL;
}
@@ -931,6 +927,8 @@ void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev)
void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev)
{
+ lockdep_assert_held(&cdev->lock);
+
cooling_device_stats_destroy(cdev);
cooling_device_stats_setup(cdev);
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fa43df05342b..3ba9c8b93ae6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1903,6 +1903,17 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
{
switch (iir & 0x3f) {
+ case UART_IIR_THRI:
+ /*
+ * Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT
+ * because it's impossible to do an informed decision about
+ * that with IIR_THRI.
+ *
+ * This also fixes one known DMA Rx corruption issue where
+ * DR is asserted but DMA Rx only gets a corrupted zero byte
+ * (too early DR?).
+ */
+ return false;
case UART_IIR_RDI:
if (!up->dma->rx_running)
break;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 56e6ba3250cd..074bfed57fc9 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -858,11 +858,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
struct lpuart_port, port);
unsigned long stat = lpuart32_read(port, UARTSTAT);
unsigned long sfifo = lpuart32_read(port, UARTFIFO);
+ unsigned long ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress)
return 0;
- if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
+ /*
+ * LPUART Transmission Complete Flag may never be set while queuing a break
+ * character, so avoid checking for transmission complete when UARTCTRL_SBK
+ * is asserted.
+ */
+ if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
return TIOCSER_TEMT;
return 0;
@@ -2942,7 +2948,7 @@ static bool lpuart_uport_is_active(struct lpuart_port *sport)
tty = tty_port_tty_get(port);
if (tty) {
tty_dev = tty->dev;
- may_wake = device_may_wakeup(tty_dev);
+ may_wake = tty_dev && device_may_wakeup(tty_dev);
tty_kref_put(tty);
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7bd080720929..caa09a0c48f4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -31,6 +31,7 @@
#include <linux/ioport.h>
#include <linux/ktime.h>
#include <linux/major.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
@@ -2864,6 +2865,13 @@ static int sci_init_single(struct platform_device *dev,
sci_port->irqs[i] = platform_get_irq(dev, i);
}
+ /*
+ * The fourth interrupt on SCI port is transmit end interrupt, so
+ * shuffle the interrupts.
+ */
+ if (p->type == PORT_SCI)
+ swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
+
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
* interrupt resource is specified as there is only one interrupt ID.
@@ -2929,7 +2937,7 @@ static int sci_init_single(struct platform_device *dev,
port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
port->fifosize = sci_port->params->fifosize;
- if (port->type == PORT_SCI) {
+ if (port->type == PORT_SCI && !dev->dev.of_node) {
if (sci_port->reg_size >= 0x20)
port->regshift = 2;
else
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 37e178a9ac47..70b112038792 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1409,13 +1409,6 @@ static int ufshcd_devfreq_target(struct device *dev,
struct ufs_clk_info *clki;
unsigned long irq_flags;
- /*
- * Skip devfreq if UFS initialization is not finished.
- * Otherwise ufs could be in a inconsistent state.
- */
- if (!smp_load_acquire(&hba->logical_unit_scan_finished))
- return 0;
-
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -8399,6 +8392,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ hba->clk_scaling.is_allowed = true;
+
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
+ }
+
ufs_bsg_probe(hba);
ufshpb_init(hba);
scsi_scan_host(hba->host);
@@ -8670,12 +8679,6 @@ out:
if (ret) {
pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba);
- } else {
- /*
- * Make sure that when reader code sees UFS initialization has finished,
- * all initialization steps have really been executed.
- */
- smp_store_release(&hba->logical_unit_scan_finished, true);
}
}
@@ -10316,30 +10319,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
ufshcd_set_ufs_dev_active(hba);
- /* Initialize devfreq */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- hba->clk_scaling.is_allowed = true;
-
- err = ufshcd_devfreq_init(hba);
- if (err)
- goto rpm_put_sync;
-
- hba->clk_scaling.is_enabled = true;
- ufshcd_init_clk_scaling_sysfs(hba);
- }
-
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
return 0;
-rpm_put_sync:
- pm_runtime_put_sync(dev);
free_tmf_queue:
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
index d63d5d92f255..f317d3c84781 100644
--- a/drivers/usb/cdns3/cdnsp-ep0.c
+++ b/drivers/usb/cdns3/cdnsp-ep0.c
@@ -414,7 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
void cdnsp_setup_analyze(struct cdnsp_device *pdev)
{
struct usb_ctrlrequest *ctrl = &pdev->setup;
- int ret = 0;
+ int ret = -EINVAL;
u16 len;
trace_cdnsp_ctrl_req(ctrl);
@@ -424,7 +424,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index a23ddbb81979..560793545362 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -49,6 +49,7 @@
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLM 0x7eb1
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -474,6 +475,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ddfc537c7526..56cdfb2e4211 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1251,7 +1251,7 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
p->kiocb = kiocb;
if (p->aio) {
p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
- if (!p->to_free) {
+ if (!iter_is_ubuf(&p->data) && !p->to_free) {
kfree(p);
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index d605bc2e7e8f..28249d0bf062 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -614,7 +614,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (!priv)
goto fail;
priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
- if (!priv->to_free) {
+ if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
kfree(priv);
goto fail;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fb988e4ea924..6db07ca419c3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -771,12 +771,11 @@ static struct pci_driver xhci_pci_driver = {
/* suspend and resume implemented later */
.shutdown = usb_hcd_pci_shutdown,
- .driver = {
#ifdef CONFIG_PM
- .pm = &usb_hcd_pci_pm_ops,
-#endif
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
},
+#endif
};
static int __init xhci_pci_init(void)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 1ff22f675930..a88c39e525c2 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1360,6 +1360,9 @@ static void tegra_xhci_id_work(struct work_struct *work)
mutex_unlock(&tegra->lock);
+ tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
+ tegra->otg_usb2_port);
+
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@@ -1474,9 +1477,6 @@ static int tegra_xhci_id_notify(struct notifier_block *nb,
}
tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
- tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
- tegra->padctl,
- tegra->otg_usb2_port);
tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6183ce8574b1..6307bae9cddf 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
+#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
@@ -228,6 +229,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct iommu_domain *domain;
int err, i;
u64 val;
u32 intrs;
@@ -246,7 +248,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
* an iommu. Doing anything when there is no iommu is definitely
* unsafe...
*/
- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
+ domain = iommu_get_domain_for_dev(dev);
+ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
+ domain->type == IOMMU_DOMAIN_IDENTITY)
return;
xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
@@ -4438,6 +4442,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, command);
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 832ad592b7ef..cdea1bff3b70 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e6d8d9b35ad0..f31cc3c76329 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
+ .driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
@@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
.driver_info = RSVD(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 662cd043b50e..8f3e884222ad 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -112,8 +112,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
- else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
+ else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
+ /* Default to pin assign C if available */
+ if (pin_assign & BIT(DP_PIN_ASSIGN_C))
+ pin_assign = BIT(DP_PIN_ASSIGN_C);
+ }
if (!pin_assign)
return -EINVAL;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 520646ae7fa0..195963b82b63 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -2467,10 +2467,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
err = 0;
goto out;
}
+ mlx5_vdpa_add_debugfs(ndev);
err = setup_virtqueues(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
- goto out;
+ goto err_setup;
}
err = create_rqt(ndev);
@@ -2500,6 +2501,8 @@ err_tir:
destroy_rqt(ndev);
err_rqt:
teardown_virtqueues(ndev);
+err_setup:
+ mlx5_vdpa_remove_debugfs(ndev->debugfs);
out:
return err;
}
@@ -2513,6 +2516,8 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
if (!ndev->setup)
return;
+ mlx5_vdpa_remove_debugfs(ndev->debugfs);
+ ndev->debugfs = NULL;
teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
@@ -3261,7 +3266,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_reg;
- mlx5_vdpa_add_debugfs(ndev);
mgtdev->ndev = ndev;
return 0;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 862f405362de..dfe2ce341803 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -466,16 +466,21 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vdpasim_net_setup_config(simdev, config);
- ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
- if (ret)
- goto reg_err;
-
net = sim_to_net(simdev);
u64_stats_init(&net->tx_stats.syncp);
u64_stats_init(&net->rx_stats.syncp);
u64_stats_init(&net->cq_stats.syncp);
+ /*
+ * Initialization must be completed before this call, since it can
+ * connect the device to the vDPA bus, so requests can arrive after
+ * this call.
+ */
+ ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
+ if (ret)
+ goto reg_err;
+
return 0;
reg_err:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index b244e7c0f514..32d0be968103 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -125,7 +125,6 @@ struct vhost_scsi_tpg {
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
- struct list_head tmf_queue;
};
struct vhost_scsi_tport {
@@ -206,10 +205,8 @@ struct vhost_scsi {
struct vhost_scsi_tmf {
struct vhost_work vwork;
- struct vhost_scsi_tpg *tpg;
struct vhost_scsi *vhost;
struct vhost_scsi_virtqueue *svq;
- struct list_head queue_entry;
struct se_cmd se_cmd;
u8 scsi_resp;
@@ -352,12 +349,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
- struct vhost_scsi_tpg *tpg = tmf->tpg;
struct vhost_scsi_inflight *inflight = tmf->inflight;
- mutex_lock(&tpg->tv_tpg_mutex);
- list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
- mutex_unlock(&tpg->tv_tpg_mutex);
+ kfree(tmf);
vhost_scsi_put_inflight(inflight);
}
@@ -1194,19 +1188,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
goto send_reject;
}
- mutex_lock(&tpg->tv_tpg_mutex);
- if (list_empty(&tpg->tmf_queue)) {
- pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
- mutex_unlock(&tpg->tv_tpg_mutex);
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+ if (!tmf)
goto send_reject;
- }
-
- tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
- queue_entry);
- list_del_init(&tmf->queue_entry);
- mutex_unlock(&tpg->tv_tpg_mutex);
- tmf->tpg = tpg;
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
tmf->vhost = vs;
tmf->svq = svq;
tmf->resp_iov = vq->iov[vc->out];
@@ -1658,7 +1644,10 @@ undepend:
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
tpg = vs_tpg[i];
if (tpg) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->vhost_scsi = NULL;
tpg->tv_tpg_vhost_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
}
}
@@ -2032,19 +2021,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tmf *tmf;
-
- tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
- if (!tmf)
- return -ENOMEM;
- INIT_LIST_HEAD(&tmf->queue_entry);
- vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
- list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotplug(tpg, lun);
@@ -2059,16 +2040,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
- struct vhost_scsi_tmf *tmf;
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
- tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
- queue_entry);
- list_del(&tmf->queue_entry);
- kfree(tmf);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotunplug(tpg, lun);
@@ -2329,7 +2305,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
- INIT_LIST_HEAD(&tpg->tmf_queue);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 0a2c47df01f4..eb565a10e5cd 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -823,7 +823,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
int oldidx = con2fb_map[unit];
struct fb_info *info = fbcon_registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0, show_logo;
+ int err = 0, show_logo;
WARN_CONSOLE_UNLOCKED();
@@ -841,26 +841,26 @@ static int set_con2fb_map(int unit, int newidx, int user)
if (oldidx != -1)
oldinfo = fbcon_registered_fb[oldidx];
- found = search_fb_in_map(newidx);
-
- if (!err && !found) {
+ if (!search_fb_in_map(newidx)) {
err = con2fb_acquire_newinfo(vc, info, unit);
- if (!err)
- con2fb_map[unit] = newidx;
+ if (err)
+ return err;
+
+ fbcon_add_cursor_work(info);
}
+ con2fb_map[unit] = newidx;
+
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
- if (!err && oldinfo && !search_fb_in_map(oldidx))
+ if (oldinfo && !search_fb_in_map(oldidx))
con2fb_release_oldinfo(vc, oldinfo, info);
show_logo = (fg_console == 0 && !user &&
logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_work(info);
con2fb_map_boot[unit] = newidx;
con2fb_init_display(vc, info, unit, show_logo);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 875541ff185b..3fd95a79e4c3 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1116,6 +1116,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
+ /* only for kernel-internal use */
+ var.activate &= ~FB_ACTIVATE_KD_TEXT;
console_lock();
lock_fb_info(info);
ret = fbcon_modechange_possible(info, &var);